Skip to content

Commit

Permalink
Robustify real-world ball and cup env (#160)
Browse files Browse the repository at this point in the history
* init commit with prompt hacking

* some progress, but not fully there yet (classifiers, etc. are jumpy...)

* good to go! subgoal accomplished!

* update metadata accordingly

* i think gtg?

* fix spot_env

* good for this PR

* should be good to go

---------

Co-authored-by: NishanthJKumar <[email protected]>
  • Loading branch information
nkumar-bdai and NishanthJKumar authored Nov 27, 2023
1 parent 759c61f commit ce9df64
Show file tree
Hide file tree
Showing 7 changed files with 87 additions and 30 deletions.
27 changes: 13 additions & 14 deletions predicators/envs/spot_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -676,10 +676,10 @@ def _generate_goal_description(self) -> GoalDescription:
###############################################################################

## Constants
HANDEMPTY_GRIPPER_THRESHOLD = 2.7 # made public for use in perceiver
HANDEMPTY_GRIPPER_THRESHOLD = 5.0 # made public for use in perceiver
_ONTOP_Z_THRESHOLD = 0.25
_INSIDE_Z_THRESHOLD = 0.25
_ONTOP_SURFACE_BUFFER = 0.1
_ONTOP_SURFACE_BUFFER = 0.48
_INSIDE_SURFACE_BUFFER = 0.1
_REACHABLE_THRESHOLD = 1.85
_REACHABLE_YAW_THRESHOLD = 0.95 # higher better
Expand Down Expand Up @@ -808,22 +808,24 @@ def _object_in_xy_classifier(state: State,
surface_geom = _object_to_top_down_geom(obj2, state, size_buffer=buffer)
center_x = state.get(obj1, "x")
center_y = state.get(obj1, "y")
ret_val = surface_geom.contains_point(center_x, center_y)

return surface_geom.contains_point(center_x, center_y)
return ret_val


def _on_classifier(state: State, objects: Sequence[Object]) -> bool:
obj_on, obj_surface = objects

if not _object_in_xy_classifier(
state, obj_on, obj_surface, buffer=_ONTOP_SURFACE_BUFFER):
return False

# Check that the bottom of the object is close to the top of the surface.
expect = state.get(obj_surface, "z") + state.get(obj_surface, "height") / 2
actual = state.get(obj_on, "z") - state.get(obj_on, "height") / 2

classification_val = abs(actual - expect) < _ONTOP_Z_THRESHOLD

# If so, check that the object is within the bounds of the surface.
if not _object_in_xy_classifier(
state, obj_on, obj_surface, buffer=_ONTOP_SURFACE_BUFFER):
return False

return classification_val


Expand Down Expand Up @@ -1088,10 +1090,7 @@ def _create_operators() -> Iterator[STRIPSOperator]:
obj = Variable("?object", _movable_object_type)
parameters = [robot, obj]
preconds = {LiftedAtom(_NotBlocked, [obj])}
add_effs = {
LiftedAtom(_InView, [robot, obj]),
LiftedAtom(_Reachable, [robot, obj])
}
add_effs = {LiftedAtom(_InView, [robot, obj])}
del_effs = set()
ignore_effs = {_Reachable, _InHandView, _InView}
yield STRIPSOperator("MoveToBodyViewObject", parameters, preconds,
Expand All @@ -1112,6 +1111,7 @@ def _create_operators() -> Iterator[STRIPSOperator]:
}
del_effs = {
LiftedAtom(_On, [obj, surface]),
LiftedAtom(_Inside, [obj, surface]),
LiftedAtom(_HandEmpty, [robot]),
LiftedAtom(_InHandView, [robot, obj])
}
Expand Down Expand Up @@ -1170,7 +1170,6 @@ def _create_operators() -> Iterator[STRIPSOperator]:
parameters = [robot, held, container, surface]
preconds = {
LiftedAtom(_Holding, [robot, held]),
LiftedAtom(_Reachable, [robot, container]),
LiftedAtom(_InView, [robot, container]),
LiftedAtom(_On, [container, surface]),
LiftedAtom(_IsPlaceable, [held]),
Expand Down Expand Up @@ -2054,7 +2053,7 @@ def _detection_id_to_obj(self) -> Dict[ObjectDetectionID, Object]:
detection_id_to_obj[ball_detection] = ball

cup = Object("cup", _container_type)
cup_detection = LanguageObjectDetectionID("large cup")
cup_detection = LanguageObjectDetectionID("ashtray/large black wheel")
detection_id_to_obj[cup_detection] = cup

for obj, pose in get_known_immovable_objects().items():
Expand Down
27 changes: 19 additions & 8 deletions predicators/ground_truth_models/spot_env/nsrts.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,8 @@ def _move_to_body_view_object_sampler(state: State, goal: Set[GroundAtom],
# Parameters are relative distance, dyaw (to the object you're moving to).
del goal

min_dist = 1.5
max_dist = 1.85
min_dist = 1.7
max_dist = 1.95

robot_obj = objs[0]
obj_to_nav_to = objs[1]
Expand Down Expand Up @@ -94,7 +94,7 @@ def _move_to_reach_object_sampler(state: State, goal: Set[GroundAtom],

# NOTE: closer than move_to_view. Important for placing.
min_dist = 0.0
max_dist = 0.95
max_dist = 0.8

robot_obj = objs[0]
obj_to_nav_to = objs[1]
Expand All @@ -113,8 +113,17 @@ def _place_object_on_top_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
# Parameters are relative dx, dy, dz (to surface objects center).
del state, goal, objs, rng # randomization coming soon
return np.array([0.0, 0.0, 0.05])
del goal, rng, state # randomization coming soon
dx = 0.025
dz = 0.05

# If we're placing the cup, we want to place it further away from
# the robot so it's solidly on the table in front of it.
if len(objs) == 3 and objs[1].name == "cup":
dx = 0.2
dz = 0.0

return np.array([dx, 0.0, dz])


def _drop_object_inside_sampler(state: State, goal: Set[GroundAtom],
Expand All @@ -126,11 +135,13 @@ def _drop_object_inside_sampler(state: State, goal: Set[GroundAtom],

drop_height = 0.5
dx = 0.0
dy = 0.0
if len(objs) == 4 and objs[2].name == "cup":
drop_height = 0.15
dx = 0.08 # we benefit from dropping more forward in the x!
drop_height = 0.10
dx = 0.13 # we benefit from dropping more forward in the x!
dy = 0.0

return np.array([dx, 0.0, drop_height])
return np.array([dx, dy, drop_height])


def _drag_to_unblock_object_sampler(state: State, goal: Set[GroundAtom],
Expand Down
17 changes: 15 additions & 2 deletions predicators/ground_truth_models/spot_env/options.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,8 @@ def _grasp_at_pixel_and_stow(robot: Robot, img: RGBDImageWithContext,

def _place_at_relative_position_and_stow(
robot: Robot, rel_pose: math_helpers.SE3Pose) -> None:
# NOTE: Might need to execute a move here if we are currently
# too far away...
# Place.
place_at_relative_position(robot, rel_pose)
# Now, move the arm back slightly. We do this because if we're
Expand Down Expand Up @@ -108,7 +110,7 @@ def _move_closer_and_drop_at_relative_position_and_look(
# First, check if we're too far away in distance or angle
# to place.
dist_to_object = np.sqrt(rel_pose.x * rel_pose.x + rel_pose.y * rel_pose.y)
if dist_to_object > 0.75:
if dist_to_object > 0.55:
pose_to_nav_to = math_helpers.SE2Pose(
rel_pose.x - (0.75 * rel_pose.x / dist_to_object),
rel_pose.y - (0.75 * rel_pose.y / dist_to_object), 0.0)
Expand Down Expand Up @@ -213,7 +215,7 @@ def _grasp_policy(name: str, target_obj_idx: int, state: State, memory: Dict,
hand_camera)

# Grasp from the top-down.
grasp_rot = math_helpers.Quat.from_pitch(np.pi / 2)
grasp_rot = None
# If the target object is reasonably large, don't try to stow!
target_obj_volume = state.get(target_obj, "height") * \
state.get(target_obj, "length") * state.get(target_obj, "width")
Expand Down Expand Up @@ -360,6 +362,7 @@ def _move_and_drop_object_inside_policy(state: State, memory: Dict,
name = "MoveAndDropObjectInside"
robot_obj_idx = 0
container_obj_idx = 2
ontop_surface_obj_idx = 3

robot, _, _ = get_robot()

Expand All @@ -370,6 +373,16 @@ def _move_and_drop_object_inside_policy(state: State, memory: Dict,

container_obj = objects[container_obj_idx]
container_pose = utils.get_se3_pose_from_state(state, container_obj)

surface_obj = objects[ontop_surface_obj_idx]

# Special case: the robot is already on top of the surface (because it is
# probably the floor). When this happens, just drop the object.
surface_geom = _object_to_top_down_geom(surface_obj, state)
if surface_geom.contains_point(robot_pose.x, robot_pose.y):
return utils.create_spot_env_action(name, objects, _drop_and_stow,
(robot, ))

# The dz parameter is with respect to the top of the container.
container_half_height = state.get(container_obj, "height") / 2

Expand Down
3 changes: 3 additions & 0 deletions predicators/perception/spot_perceiver.py
Original file line number Diff line number Diff line change
Expand Up @@ -363,4 +363,7 @@ def render_mental_images(self, observation: Observation,
ax.set_ylim(y_lb, y_ub)
plt.tight_layout()
img = utils.fig2data(fig, CFG.render_state_dpi)
# Uncomment to output a top-down image of the state
# after every time step.
# plt.savefig("top-down-state-view.png")
return [img]
2 changes: 2 additions & 0 deletions predicators/planning.py
Original file line number Diff line number Diff line change
Expand Up @@ -324,6 +324,8 @@ def task_plan(
if not goal.issubset(reachable_atoms):
logging.info(f"Detected goal unreachable. Goal: {goal}")
logging.info(f"Initial atoms: {init_atoms}")
logging.info(
f"Reachable atoms not in init: {reachable_atoms - init_atoms}")
raise PlanningFailure(f"Goal {goal} not dr-reachable")
dummy_task = Task(DefaultState, goal)
metrics: Metrics = defaultdict(float)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,54 +53,65 @@ static-object-features:
height: 0.43
length: 0.51
width: 0.51
flat_top_surface: 1
drafting_table:
shape: 1 # cuboid
height: 0.50
length: 0.6
width: 0.4
height: 0.47
length: 0.4
width: 0.6
flat_top_surface: 1
cube:
shape: 1 # cuboid
height: 0.083
length: 0.083
width: 0.083
placeable: 1 # true, can be placed
plunger:
shape: 1 # cuboid
height: 0.1
length: 0.1
width: 0.1
placeable: 1 # true, can be placed
soda_can:
shape: 2
height: 0.114
length: 0.065
width: 0.065
placeable: 1 # true, can be placed
floor:
shape: 1
height: 0.0001
length: 10000000 # effectively infinite
width: 10000000
flat_top_surface: 0
white-table:
shape: 1 # cuboid
height: 0.74
length: 1.27
width: 0.51
flat_top_surface: 1
bucket:
shape: 2 # cylinder
height: 0.4
width: 0.23
length: 0.23
placeable: 1 # true, can be placed
chair:
shape: 2 # cylinder
height: 0.8
length: 0.5
width: 0.5
placeable: 0 # false, can't be placed
cup:
shape: 2 # cylinder
height: 0.1
height: 0.05
length: 0.15
width: 0.15
placeable: 1
ball:
shape: 2 # cylinder
height: 0.06
length: 0.06
width: 0.06
placeable: 1

22 changes: 20 additions & 2 deletions predicators/spot_utils/perception/object_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -438,8 +438,26 @@ def get_object_center_pixel_from_artifacts(
seg_bb = detections[object_id][camera_name]
except KeyError:
raise ValueError(f"{object_id} not detected in {camera_name}")
x1, y1, x2, y2 = seg_bb.bounding_box
return int((x1 + x2) / 2), int((y1 + y2) / 2)

# Select center pixel of bounding box.
# x1, y1, x2, y2 = seg_bb.bounding_box
# pixel_tuple = int((x1 + x2) / 2), int((y1 + y2) / 2)

# Select the first ever pixel in the mask. We might want to
# select a random pixel instead.
mask = seg_bb.mask
pixels_in_mask = np.where(mask)
pixel_tuple = (pixels_in_mask[1][0], pixels_in_mask[0][0])
# Uncomment to plot the grasp pixel being selected!
# rgb_img = artifacts["language"]["rgbds"][camera_name].rgb
# _, axes = plt.subplots()
# axes.imshow(rgb_img)
# axes.add_patch(plt.Rectangle((pixel_tuple[0], pixel_tuple[1]),
# 5, 5, color='red'))
# plt.tight_layout()
# outdir = Path(CFG.spot_perception_outdir)
# plt.savefig(outdir / "grasp_pixel.png", dpi=300)
return pixel_tuple


def visualize_all_artifacts(artifacts: Dict[str,
Expand Down

0 comments on commit ce9df64

Please sign in to comment.