Commit 54e4db92 authored by czb5793's avatar czb5793
Browse files

Small changes

parent d79b50ce
......@@ -145,12 +145,12 @@ control:
# If robot is closer than this distance to the goal, it is considered as reached
goal_reached_distance: 0.05
# If a sensor measures a distance smaller than the danger distance, the robot immediately starts moving into the opposite direction
danger_distance: 0.06 # 0.06
danger_distance: 0.06
# If a sensor measures a distance smaller than the caution distance, the robot will follow the wall of the obstacle
# Set to danger_distance to disable wall following, since the map contains small, circle-like objects, where wall following can lead to looping around an object
caution_distance: 0.15 # a. 0.06; b. 0.15
caution_distance: 0.15
# Criterion for stopping the following of a wall
progress_epsilon: 0.2 # a. 0.05; b. 0.2
progress_epsilon: 0.2
slam:
# The amount of variables that describe the robot's state
......@@ -190,7 +190,7 @@ slam:
# The mahalanobis distance threshold used in data association
distance_threshold: 0.125
# The number of used particles
n_particles: 150 # 80
n_particles: 150
# Configures the motion noise. The values are currently empirically chosen.
motion_noise:
# Standard deviation of the motion command's translational velocity in m/s.
......@@ -217,10 +217,10 @@ slam:
# number of fixed vertexes while the graph optimization
num_fixed_vertexes: 20
# draw trajectory on the frame
draw_trajectory: false
draw_trajectory: true
# sparse solver: cholesky or spsolve.
# For the faster sparse solver cholesky, users have to install the scikit-sparse library.
solver: "cholesky"
solver: "spsolve"
# Configures the motion noise. The values are currently empirically chosen.
motion_noise:
# Standard deviation of the robots x-coordinate in meters after executing a motion command.
......@@ -240,13 +240,17 @@ slam:
# The raw data will be recorded during the a simulation. After clicking the
# button Plot Slam Evaluation, raw data will be stored in
# the file \scripts as two csv files, while can be analysed through jupyter notebook.
enabled: false
enabled: true
# Determines the interval of when the accuracy of the generated maps is calculated
# A low interval (for example 1) causes performance problems
# if associate_id == true, a low interval (for example 1) causes performance problems
interval: 18
# Determine the method of data association,
# if true, landmark identifiers will be associated, otherwise by minimum distance
associate_id: true
# Determines whether the data is saved in csv files, while the user
# clicks the `Plot Slam Evaluation` button.
# The csv files are usually large which can be found in the folder ./scripts
save_csv_data: false
# Configures the 2D occupancy grid mapping algorithm
mapping:
......
......@@ -144,12 +144,12 @@ control:
# If robot is closer than this distance to the goal, it is considered as reached
goal_reached_distance: 0.05
# If a sensor measures a distance smaller than the danger distance, the robot immediately starts moving into the opposite direction
danger_distance: 0.1 # 0.06
danger_distance: 0.1
# If a sensor measures a distance smaller than the caution distance, the robot will follow the wall of the obstacle
# Set to danger_distance to disable wall following, since the map contains small, circle-like objects, where wall following can lead to looping around an object
caution_distance: 0.1 # 0.06
caution_distance: 0.1
# Criterion for stopping the following of a wall
progress_epsilon: 0.05 # 0.05
progress_epsilon: 0.05
# Configures the SLAM system
slam:
......@@ -190,7 +190,7 @@ slam:
# The mahalanobis distance threshold used in data association
distance_threshold: 0.125
# The number of used particles
n_particles: 200 # 80
n_particles: 200
# Configures the motion noise. The values are currently empirically chosen.
motion_noise:
# Standard deviation of the motion command's translational velocity in m/s. # 0.02,0.015
......@@ -221,7 +221,7 @@ slam:
draw_trajectory: false
# sparse solver: cholesky or spsolve
# For the faster sparse solver cholesky, users have to install the scikit-sparse library.
solver: "cholesky"
solver: "spsolve"
# Configures the motion noise. The values are currently empirically chosen.
motion_noise:
# Standard deviation of the robots x-coordinate in meters after executing a motion command.
......@@ -241,13 +241,17 @@ slam:
# The raw data will be recorded during the a simulation. After clicking the
# button Plot Slam Evaluation, raw data will be stored in
# the file \scripts as two csv files, while can be analysed through jupyter notebook.
enabled: false
enabled: true
# Determines the interval of when the accuracy of the generated maps is calculated
# A low interval (for example 1) causes performance problems
interval: 18
# Determine the method of data association,
# if true, data wil be associated by landmark identifiers, otherwise by minimum distance
associate_id: true
# Determines whether the data is saved in csv files, while the user
# clicks the `Plot Slam Evaluation` button.
# The csv files are usually large which can be found in the folder ./scripts
save_csv_data: false
# Configures the 2D occupancy grid mapping algorithm
mapping:
......
......@@ -219,7 +219,7 @@ slam:
draw_trajectory: false
# sparse solver: cholesky or spsolve.
# For the faster sparse solver cholesky, users have to install the scikit-sparse library.
solver: "cholesky"
solver: "spsolve"
# Configures the motion noise. The values are currently empirically chosen.
motion_noise:
# Standard deviation of the robots x-coordinate in meters after executing a motion command.
......@@ -246,6 +246,10 @@ slam:
# Determine the method of data association,
# if true, landmark identifiers will be associated, otherwise by minimum distance
associate_id: true
# Determines whether the data is saved in csv files, while the user
# clicks the `Plot Slam Evaluation` button.
# The csv files are usually large. The csv-files can be found in the folder ./scripts
save_csv_data: false
# Configures the 2D occupancy grid mapping algorithm
mapping:
......
......@@ -24,7 +24,7 @@ import gi
from gi.repository import GLib
from plotters.SlamPlotter import *
from supervisor.slam.SlamEvaluation import SlamEvaluation2
from supervisor.slam.SlamEvaluation import SlamEvaluation
from plotters.MappingPlotter import MappingPlotter
......@@ -153,8 +153,8 @@ class Simulator:
self.slam_evaluation = None
if self.cfg["slam"]["evaluation"]["enabled"]:
list_slam = [self.world.supervisors[0].ekfslam, self.world.supervisors[0].fastslam, self.world.supervisors[0].graphbasedslam]
self.slam_evaluation = SlamEvaluation2(list_slam,
self.cfg["slam"]["evaluation"], self.world.robots[0])
self.slam_evaluation = SlamEvaluation(list_slam,
self.cfg["slam"]["evaluation"], self.world.robots[0])
for slam in list_slam:
if slam is not None:
slam.callback = self.slam_evaluation.time_per_step # set the callback function
......@@ -297,8 +297,6 @@ class Simulator:
if __name__ == "__main__":
filename = "config.yaml" if len(sys.argv) == 1 else sys.argv[1]
#filename = "config01.yaml" if len(sys.argv) == 1 else sys.argv[1]
#filename = "original_config.yaml" if len(sys.argv) == 1 else sys.argv[1]
with open(filename, 'r') as ymlfile:
cfg = yaml.safe_load(ymlfile)
......
......@@ -160,7 +160,5 @@ if __name__ == "__main__":
#posegraph = read_data("input_INTEL_g2o.g2o")
#posegraph = read_data("input_MITb_g2o.g2o")
posegraph = read_data("input_M3500_g2o.g2o")
#posegraph.graph_optimization(max_iter=30, damp_factor=5)
#posegraph = read_data("input_M3500a_g2o.g2o")
posegraph.graph_optimization(max_iter=16, damp_factor=1)
posegraph.draw()
......@@ -19,7 +19,7 @@
"source": [
"## How to use?\n",
"1. Please load the configuration file **config01.yaml** while starting the simulator;\n",
"2. Make sure that the attribute of **evalution** is enabled;\n",
"2. Make sure that the attribute of **evalution** is enabled and **save_csv_data** is true;\n",
"3. Start the simulator and load one of the maps **slam_example_1** to **slam_example_8**;\n",
"4. Click the play button and let the simulator run for a while;\n",
"5. Click the button **Plot Slam Evaluation**, the estimation results will be shown in figures, and the raw data will be stored in the file **scripts/sobot_information1.csv** and **scripts/sobot_information2.csv** to analyse.\n",
......
......@@ -20,7 +20,7 @@
from supervisor.ControlState import *
from simulation.exceptions import GoalReachedException, GoalNotReachedException
from utils import linalg2_util as linalg
from supervisor.slam.Timer import Timer
from supervisor.Timer import Timer
class SupervisorStateMachine:
......
......@@ -41,6 +41,6 @@ class Timer:
return False
elif time.time() - self.start_time_best_distance_to_goal > T_NO_BEST_DIST_CHANGE: # no changes in time
self.start_time_to_goal = time.time()
print("Best distance dose not changed in time.")
#print("Best distance dose not changed in time.")
return True
return False
\ No newline at end of file
......@@ -27,21 +27,20 @@ class GraphBasedSLAM(Slam):
self.motion_noise = np.diag([slam_cfg["graph_based_slam"]["motion_noise"]["x"],
slam_cfg["graph_based_slam"]["motion_noise"]["y"],
np.deg2rad(slam_cfg["graph_based_slam"]["motion_noise"]["theta"])]) ** 2
# distance threshold for determining optimization
self.min_distance_threshold = slam_cfg["graph_based_slam"]["distance_threshold"]
self.frontend_pose_density = slam_cfg["graph_based_slam"]["frontend_pose_density"]
self.frontend_interval = slam_cfg["graph_based_slam"]['frontend_interval'] # the timestep interval of executing the frontend part.
self.num_fixed_vertices = slam_cfg["graph_based_slam"]["num_fixed_vertexes"]
# solver
self.solver = slam_cfg["graph_based_slam"]['solver'].lower()
# The estimated combined state vector, initially containing the robot pose at the origin and no landmarks
# the current robot pose and its uncertainty
self.mu = np.zeros((self.robot_state_size, 1))
self.Sigma = np.zeros((self.robot_state_size, self.robot_state_size)) # The state covariance, initially set to absolute certainty of the initial robot pose
self.step_counter = 0
self.graph = LMGraph()
self.flg_optim = False
self.callback = callback
self.flg_optim = False # determines whether the graph should be optimized
self.callback = callback # a callback function
# add the first node to the graph
vertex1 = PoseVertex(self.mu, np.eye(3))
......@@ -57,7 +56,7 @@ class GraphBasedSLAM(Slam):
return Pose(self.mu[0, 0], self.mu[1, 0], self.mu[2, 0])
def get_estimated_trajectory(self):
return [ (v.pose[0,0], v.pose[1,0]) \
return [(v.pose[0,0], v.pose[1,0]) \
for v in self.graph.get_estimated_pose_vertices()]
def get_hessian(self):
......@@ -79,7 +78,7 @@ class GraphBasedSLAM(Slam):
def plot_graph(self):
"""
plot the graph estimated by slam
plot the graph estimated by the Graph-based SLAM
"""
self.graph.draw()
......@@ -105,8 +104,6 @@ class GraphBasedSLAM(Slam):
self.__front_end(z)
num_vertices = len(self.graph.vertices) # #vertices = #poses + #landmarks
if num_vertices > 0 and self.flg_optim == True:
print (str(self.counter) + ": " + "Graph-based SLAM starts the backend-optimization with " +
str(num_vertices) + " vertices")
self.flg_optim = False
self.__back_end()
......@@ -173,18 +170,9 @@ class GraphBasedSLAM(Slam):
self.mu = np.copy(last_vertex.pose) # update current state
self.Sigma = np.copy(last_vertex.sigma)
def __calc_range_bearing_delta(self, x, lm, z):
delta = lm - x[0:2, :]
range = np.linalg.norm(delta, axis=0)
phi = np.arctan2(delta[1, :], delta[0, :])
ext_z = np.vstack((range, phi))
err = ext_z - np.array(z).reshape((2,1))
err[1, :] = np.arctan2(np.sin(err[1, :]), np.cos(err[1, :]))
return err
def __data_association(self, zi):
"""
Associates the measurement to a landmark using the landmark id.
Associates the measurement to a landmark using the landmark identifiers.
:param zi: (distance, angle, landmark_id)
return
vertex: a vertex object with the same id containing in zi
......@@ -198,13 +186,6 @@ class GraphBasedSLAM(Slam):
break
return vertex
def get_estimated_landmark_position(self):
lm_pos = []
vertices_lm = self.graph.get_estimated_landmark_vertices()
for v in vertices_lm:
lm_pos.append((v.pose[0, 0], v.pose[1, 0]))
return lm_pos, vertices_lm
@staticmethod
def jaco_motion_model(x, u, dt):
v, w = u[0, 0], u[1, 0]
......@@ -265,17 +246,5 @@ class GraphBasedSLAM(Slam):
return lm
@staticmethod
def diff_to_uni(v_l, v_r, width):
"""
:param v_l: Translational velocity of the left wheel
:param v_r: Translational velocity of the right wheel
:return v, w
Translational and angular velocities
"""
v = (v_r + v_l) * 0.5
w = (v_r - v_l) / width
return v, w
def __str__(self):
return "Graph-based SLAM"
\ No newline at end of file
......@@ -4,10 +4,10 @@ from matplotlib.ticker import MaxNLocator
from models.obstacles.FeaturePoint import FeaturePoint
import pandas as pd
class SlamEvaluation2:
class SlamEvaluation:
def __init__(self, list_slam, evaluation_cfg, robot):
"""
Initializes an object of the SlamEvaluation2 class
Initializes an object of the SlamEvaluation class
:param list_slam: a list of slam algorithms that will be evaluated
:param evaluation_cfg: The configurations for the class.
Currently only used to calculate number of simulation cycles
......@@ -20,6 +20,9 @@ class SlamEvaluation2:
self.robot = robot
self.reset_record()
self.sim_circle = 0
self.interval = self.cfg["interval"]
self.save_csv_data = self.cfg['save_csv_data']
self.associate_id = self.cfg['associate_id']
def reset_record(self):
self.head1 = ["sim_circle", "landmark_id", "estimated_landmark_position", "estimated_robot_pose",
......@@ -41,7 +44,7 @@ class SlamEvaluation2:
slam_obstacles = slam.get_landmarks()
distances = []
for i, slam_obstacle in enumerate(slam_obstacles):
if self.cfg["associate_id"] == True:
if self.associate_id == True:
dist = self.__find_distance_by_landmark_id(slam_obstacle, obstacles)
else:
dist = self.__find_min_distance(slam_obstacle, obstacles)
......@@ -56,6 +59,7 @@ class SlamEvaluation2:
"""
Record data in a list
:param sim_circle: simulation circle
:param obstacles: list of obstacle objects in the world.
"""
self.sim_circle = sim_circle
for j, slam in enumerate(self.list_slam):
......@@ -75,11 +79,8 @@ class SlamEvaluation2:
actual_landmark_position = obstacle.pose.sunpack()
break
self.data.append([int(sim_circle), int(landmark_id), estimated_landmark_position,
estimated_robot_position, actual_landmark_position, actual_robot_position, slam_name])
if sim_circle % 1000 == 0:
print ("sim_circle", sim_circle)
self.data.append([int(sim_circle), int(landmark_id), estimated_landmark_position, estimated_robot_position,
actual_landmark_position, actual_robot_position, slam_name])
def time_per_step(self, name, time):
"""
......@@ -118,7 +119,8 @@ class SlamEvaluation2:
Produces a plot of how the average distance changed over the course of the simulation.
Saves the plot in a png file.
"""
self.save_infomation("./scripts/sobot_information")
if self.save_csv_data:
self.save_infomation("./scripts/sobot_information")
fig, ax = plt.subplots(2, figsize=(9,8))
......@@ -130,8 +132,8 @@ class SlamEvaluation2:
name = str(slam)
# Calculates number of elapsed simulation cycles
sim_cycles = len(self.average_distances_lm[i]) * self.cfg["interval"]
ax[0].plot(range(0, sim_cycles, self.cfg["interval"]),
sim_cycles = len(self.average_distances_lm[i]) * self.interval
ax[0].plot(range(0, sim_cycles, self.interval),
self.average_distances_lm[i],
line_styles[i%len(self.list_slam)], label = name)
ax[0].xaxis.set_major_locator(MaxNLocator(integer=True))
......@@ -141,8 +143,6 @@ class SlamEvaluation2:
ax[0].grid()
plt.show()
#fig, ax = plt.subplots()
ax[1].grid()
for i, slam in enumerate(self.list_slam):
if slam is None:
......@@ -150,8 +150,8 @@ class SlamEvaluation2:
name = str(slam)
# Calculates number of elapsed simulation cycles
sim_cycles = len(self.average_distances_lm[i]) * self.cfg["interval"]
ax[1].plot(range(0, sim_cycles, self.cfg["interval"]), self.distances_robot[i], line_styles[i%len(self.list_slam)], label = name)
sim_cycles = len(self.average_distances_lm[i]) * self.interval
ax[1].plot(range(0, sim_cycles, self.interval), self.distances_robot[i], line_styles[i%len(self.list_slam)], label = name)
ax[1].legend()
ax[1].set(xlabel='Simulation cycles', ylabel='Distance to true robot position in meters', title='Evaluation of SLAM')
plt.savefig('slam_robot_position_evaluation.png')
......
......@@ -85,10 +85,10 @@ class Graph:
"""
Optimization of the graph
:param number_fix: fix the estimation of the initial step
:param damp_factor: how fasten you want to fix a vertex.
:param damp_factor: how much you want to fix a vertex.
:param max_iter: the maximum number of iterations
:param callback: a callback function, callback(vertices, edges, info)
:param difference for determining convergence
:param epsilon: difference for determining convergence
:return: epsilon global error after optimization
"""
global_error = np.inf
......@@ -128,7 +128,7 @@ class Graph:
def apply_covariance(self, H):
H = H.tocsr()
indices, hess_size = self.get_block_index_(self.vertices)
indices, hess_size = self.get_block_index(self.vertices)
for id, vertex in enumerate(self.vertices):
i1, i2 = indices[id]
Hii = H[i1:i2, i1:i2].toarray()
......@@ -159,7 +159,7 @@ class Graph:
H: the hessian matrix (information matrix)
b: information vector
"""
indices, hess_size = self.get_block_index_(vertices) # calculate indices for each block in the hessian matrix
indices, hess_size = self.get_block_index(vertices) # calculate indices for each block in the hessian matrix
b = np.zeros((hess_size, 1), dtype=np.float64) # initialize an information vector
Hessian_data = list() # a list to store the block matrices (flattened)
row_indices = list() # a list of row indices for the corresponding values in H matrix
......@@ -177,7 +177,6 @@ class Graph:
Hij = A.T @ omega @ B
Hji = B.T @ omega @ A
""" Compute the hessian matrix and vector """
i1, i2 = indices[edge.vertex1.id]
j1, j2 = indices[edge.vertex2.id]
......@@ -242,14 +241,14 @@ class Graph:
A apply the increment dx on all vertices of the graph
:param dx: increment
"""
indices, _ = self.get_block_index_(self.vertices)
indices, _ = self.get_block_index(self.vertices)
for i, v in enumerate(self.vertices):
i1, i2 = indices[i]
v.pose += dx[i1:i2]
self.normalize_angles(self.vertices)
@staticmethod
def get_block_index_(vertices):
def get_block_index(vertices):
"""
Calculate block indices in hessian matrix for each vertices of the graph
:param vertices: A list of vertices of the graph
......
......@@ -261,8 +261,6 @@ class LMGraph(Graph):
raise RuntimeError()
return pose, landmarks
def get_last_pose_vertex(self):
"""
Return the last vertex object of poses
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment