diff --git a/README.md b/README.md
index 72a43deff..edaefbdd2 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,16 @@
![splash](./docs/assets/splash.png)
-#
OmniGibson
+#
OmniGibson
###
+-------
+
+## Latest Updates
+- [04/07/22] **v0.0.6**: Significantly improved stability, performance, and ease of installation :wrench: [[release notes]](https://github.com/StanfordVL/OmniGibson/releases/tag/v0.0.6)
+
+-------
+
**`OmniGibson`** is a platform for accelerating Embodied AI research built upon NVIDIA's [Omniverse](https://www.nvidia.com/en-us/omniverse/) platform, featuring:
* 📸 Photorealistic Visuals and 📐 Physical Realism
diff --git a/docs/dist/css/style.css b/docs/dist/css/style.css
index 1bcb9d812..d6073271a 100644
--- a/docs/dist/css/style.css
+++ b/docs/dist/css/style.css
@@ -977,10 +977,9 @@ button,input,select,textarea,label{
top: 0;
left: 0;
width: 100%;
- height: 1200px;
+ height: 100vh;
background-image: url("../../assets/splash_no_logo.png"), linear-gradient(rgba(0, 0, 0, 0.5), rgba(0, 0, 0, 1));
- mask-image: linear-gradient(to right, rgba(0, 0, 0, 1.0) 50%, transparent 100%);
- background-size: contain;
+ background-size: 100%;
background-repeat: no-repeat;
}
diff --git a/omnigibson/__init__.py b/omnigibson/__init__.py
index ed33c80d8..aea51789f 100644
--- a/omnigibson/__init__.py
+++ b/omnigibson/__init__.py
@@ -83,6 +83,13 @@ def create_app():
enable_extension("omni.particle.system.bundle")
enable_extension("omni.kit.window.viewport") # This is needed for windows
+ # Globally suppress certain logging modules (unless we're in debug mode) since they produce spurious warnings
+ if not gm.DEBUG:
+ import omni.log
+ log = omni.log.get_log()
+ for channel in ["omni.hydra.scene_delegate.plugin", "omni.kit.manipulator.prim.model"]:
+ log.set_channel_enabled(channel, False, omni.log.SettingBehavior.OVERRIDE)
+
# Possibly hide windows if in debug mode
if gm.GUI_VIEWPORT_ONLY:
hide_window_names = ["Console", "Main ToolBar", "Stage", "Layer", "Property", "Render Settings", "Content",
diff --git a/omnigibson/prims/xform_prim.py b/omnigibson/prims/xform_prim.py
index ebe5ac7ce..d94cb9416 100644
--- a/omnigibson/prims/xform_prim.py
+++ b/omnigibson/prims/xform_prim.py
@@ -12,7 +12,7 @@
from omni.isaac.core.utils.stage import get_current_stage
from omnigibson.prims.prim_base import BasePrim
from omnigibson.prims.material_prim import MaterialPrim
-from omnigibson.utils.transform_utils import quat2mat, mat2euler
+from omnigibson.utils.transform_utils import quat2euler
from omnigibson.utils.usd_utils import BoundingBoxAPI
from scipy.spatial.transform import Rotation as R
@@ -235,7 +235,7 @@ def get_rpy(self):
Returns:
3-array: (roll, pitch, yaw) global euler orientation of this prim
"""
- return mat2euler(quat2mat(self.get_orientation()))
+ return quat2euler(self.get_orientation())
def get_local_pose(self):
"""
diff --git a/omnigibson/robots/manipulation_robot.py b/omnigibson/robots/manipulation_robot.py
index 9b1e7ec41..c2250d2b4 100644
--- a/omnigibson/robots/manipulation_robot.py
+++ b/omnigibson/robots/manipulation_robot.py
@@ -4,6 +4,7 @@
import omnigibson as og
from omnigibson.macros import gm, create_module_macros
+from omnigibson.object_states import ContactBodies
from omnigibson.utils.asset_utils import get_assisted_grasping_categories
import omnigibson.utils.transform_utils as T
from omnigibson.controllers import (
@@ -238,7 +239,7 @@ def is_grasping(self, arm="default", candidate_obj=None):
Args:
arm (str): specific arm to check for grasping. Default is "default" which corresponds to the first entry
in self.arm_names
- candidate_obj (EntityPrim or None): object to check if this robot is currently grasping. If None, then
+ candidate_obj (StatefulObject or None): object to check if this robot is currently grasping. If None, then
will be a general (object-agnostic) check for grasping.
Note: if self.grasping_mode is "physical", then @candidate_obj will be ignored completely
@@ -264,17 +265,8 @@ def is_grasping(self, arm="default", candidate_obj=None):
is_grasping = self._controllers["gripper_{}".format(arm)].is_grasping()
# If candidate obj is not None, we also check to see if our fingers are in contact with the object
if is_grasping and candidate_obj is not None:
- grasping_obj = False
- obj_links = {link.prim_path for link in candidate_obj.links.values()}
- finger_links = {link.prim_path for link in self.finger_links[arm]}
- for c in self.contact_list():
- c_set = {c.body0, c.body1}
- # Valid grasping of object if one of the set is a finger link and the other is the grasped object
- if len(c_set - finger_links) == 1 and len(c_set - obj_links) == 1:
- grasping_obj = True
- break
- # Update is_grasping
- is_grasping = grasping_obj
+ finger_links = {link for link in self.finger_links[arm]}
+ is_grasping = len(candidate_obj.states[ContactBodies].get_value().intersection(finger_links)) > 0
return is_grasping
diff --git a/omnigibson/scenes/scene_base.py b/omnigibson/scenes/scene_base.py
index bba8fe724..d0337fc93 100644
--- a/omnigibson/scenes/scene_base.py
+++ b/omnigibson/scenes/scene_base.py
@@ -208,25 +208,6 @@ def _load_objects_from_scene_file(self):
self.disable_collisions_for_fixed_objects()
- def disable_collisions_for_fixed_objects(self):
- # disable collision between the fixed links of the fixed objects
- fixed_objs = self.object_registry("fixed_base", True, default_val=[])
- if len(fixed_objs) > 1:
- # We iterate over all pairwise combinations of fixed objects
- building_categories = {"walls", "floors", "ceilings"}
- for obj_a, obj_b in combinations(fixed_objs, 2):
- # TODO: Remove this hotfix once asset collision meshes are fixed!
- # Filter out collisions between walls / ceilings / floors and ALL links of the other object
- if obj_a.category in building_categories:
- for link in obj_b.links.values():
- obj_a.root_link.add_filtered_collision_pair(link)
- elif obj_b.category in building_categories:
- for link in obj_a.links.values():
- obj_b.root_link.add_filtered_collision_pair(link)
- else:
- # Only filter out root links
- obj_a.root_link.add_filtered_collision_pair(obj_b.root_link)
-
def _should_load_object(self, obj_info):
"""
Helper function to check whether we should load an object given its init_info. Useful for potentially filtering
@@ -419,6 +400,22 @@ def add_object(self, obj, register=True, _is_call_from_simulator=False):
# let scene._load() load the object when called later on.
prim = obj.load()
+ # If this object is fixed, disable collisions between the fixed links of the fixed objects
+ if obj.fixed_base:
+ # TODO: Remove building hotfix once asset collision meshes are fixed!!
+ building_categories = {"walls", "floors", "ceilings"}
+ for fixed_obj in self.fixed_objects.values():
+ # Filter out collisions between walls / ceilings / floors and ALL links of the other object
+ if obj.category in building_categories:
+ for link in fixed_obj.links.values():
+ obj.root_link.add_filtered_collision_pair(link)
+ elif fixed_obj.category in building_categories:
+ for link in obj.links.values():
+ fixed_obj.root_link.add_filtered_collision_pair(link)
+ else:
+ # Only filter out root links
+ obj.root_link.add_filtered_collision_pair(fixed_obj.root_link)
+
# Add this object to our registry based on its type, if we want to register it
if register:
self.object_registry.add(obj)
@@ -477,7 +474,7 @@ def fixed_objects(self):
dict: Keyword-mapped objects that are fixed in the scene. Maps object name to their object class instances
(DatasetObject)
"""
- return {obj.name: obj for obj in self.object_registry("fixed_base", True)}
+ return {obj.name: obj for obj in self.object_registry("fixed_base", True, default_val=[])}
def get_random_floor(self):
"""
diff --git a/omnigibson/tasks/behavior_task.py b/omnigibson/tasks/behavior_task.py
index 56a2cf0c7..c04f8c645 100644
--- a/omnigibson/tasks/behavior_task.py
+++ b/omnigibson/tasks/behavior_task.py
@@ -19,6 +19,7 @@
import omnigibson as og
from omnigibson.macros import gm
+from omnigibson.object_states import Pose
from omnigibson.objects.dataset_object import DatasetObject
from omnigibson.reward_functions.potential_reward import PotentialReward
from omnigibson.robots.robot_base import BaseRobot
@@ -35,6 +36,7 @@
WATER_SYNSETS,
SYSTEM_SYNSETS_TO_SYSTEM_NAMES,
)
+import omnigibson.utils.transform_utils as T
from omnigibson.utils.python_utils import classproperty, assert_valid_key
from omnigibson.systems import get_system
from omnigibson.utils.ui_utils import create_module_logger
@@ -155,8 +157,7 @@ def _load(self, env):
# Initialize the current activity
success, self.feedback = self.initialize_activity(env=env)
- if not success:
- print(f"Failed to initialize Behavior Activity. Feedback:\n{self.feedback}")
+ assert success, f"Failed to initialize Behavior Activity. Feedback:\n{self.feedback}"
# Highlight any task relevant objects if requested
if self.highlight_task_relevant_objs:
@@ -503,22 +504,22 @@ def check_scene(self, env):
"""
error_msg = self.parse_non_sampleable_object_room_assignment(env)
if error_msg:
- log.warning(error_msg)
+ log.error(error_msg)
return False, error_msg
error_msg = self.build_sampling_order(env)
if error_msg:
- log.warning(error_msg)
+ log.error(error_msg)
return False, error_msg
error_msg = self.build_non_sampleable_object_scope(env)
if error_msg:
- log.warning(error_msg)
+ log.error(error_msg)
return False, error_msg
error_msg = self.import_sampleable_objects(env)
if error_msg:
- log.warning(error_msg)
+ log.error(error_msg)
return False, error_msg
self.object_scope["agent.n.01_1"] = self.get_agent(env)
@@ -555,9 +556,9 @@ def assign_object_scope_with_cache(self, env):
elif self.object_instance_to_category[obj_inst] in SYSTEM_SYNSETS_TO_SYSTEM_NAMES:
matched_sim_obj = get_system(SYSTEM_SYNSETS_TO_SYSTEM_NAMES[self.object_instance_to_category[obj_inst]])
else:
- log.info(f"checking objects...")
+ log.debug(f"checking objects...")
for sim_obj in og.sim.scene.objects:
- log.info(f"checking bddl obj scope for obj: {sim_obj.name}")
+ log.debug(f"checking bddl obj scope for obj: {sim_obj.name}")
if hasattr(sim_obj, "bddl_object_scope") and sim_obj.bddl_object_scope == obj_inst:
matched_sim_obj = sim_obj
break
@@ -577,7 +578,7 @@ def process_single_condition(self, condition):
- bool: Whether this evaluated condition is positive or negative
"""
if not isinstance(condition.children[0], Negation) and not isinstance(condition.children[0], AtomicFormula):
- log.warning(("Skipping over sampling of predicate that is not a negation or an atomic formula"))
+ log.debug(("Skipping over sampling of predicate that is not a negation or an atomic formula"))
return None, None
if isinstance(condition.children[0], Negation):
@@ -679,7 +680,7 @@ def filter_object_scope(self, input_object_scope, conditions, condition_type):
str(success),
]
)
- log.warning(log_msg)
+ log.info(log_msg)
# If any condition fails for this candidate object, skip
if not success:
@@ -761,23 +762,23 @@ def maximum_bipartite_matching(self, filtered_object_scope, condition_type):
obj_inst_to_obj_per_room_inst[obj_inst] = filtered_object_scope[room_type][obj_inst][room_inst]
top_nodes = []
log_msg = "MBM for room instance [{}]".format(room_inst)
- log.warning((log_msg))
+ log.debug((log_msg))
for obj_inst in obj_inst_to_obj_per_room_inst:
for obj in obj_inst_to_obj_per_room_inst[obj_inst]:
# Create an edge between obj instance and each of the simulator obj that supports sampling
graph.add_edge(obj_inst, obj)
log_msg = "Adding edge: {} <-> {}".format(obj_inst, obj.name)
- log.warning((log_msg))
+ log.debug((log_msg))
top_nodes.append(obj_inst)
# Need to provide top_nodes that contain all nodes in one bipartite node set
# The matches will have two items for each match (e.g. A -> B, B -> A)
matches = nx.bipartite.maximum_matching(graph, top_nodes=top_nodes)
if len(matches) == 2 * len(obj_inst_to_obj_per_room_inst):
- log.warning(("Object scope finalized:"))
+ log.debug(("Object scope finalized:"))
for obj_inst, obj in matches.items():
if obj_inst in obj_inst_to_obj_per_room_inst:
self.object_scope[obj_inst] = obj
- log.warning((obj_inst, obj.name))
+ log.debug((obj_inst, obj.name))
success = True
break
if not success:
@@ -825,7 +826,7 @@ def sample_goal_conditions(self):
None or str: If successful, returns None. Otherwise, returns an error message
"""
np.random.shuffle(self.ground_goal_state_options)
- log.warning(("number of ground_goal_state_options", len(self.ground_goal_state_options)))
+ log.debug(("number of ground_goal_state_options", len(self.ground_goal_state_options)))
num_goal_condition_set_to_test = 10
goal_condition_success = False
@@ -922,23 +923,23 @@ def sample(self, env, validate_goal=False):
error_msg = self.group_initial_conditions()
if error_msg:
- log.warning(error_msg)
+ log.error(error_msg)
return False, error_msg
error_msg = self.sample_initial_conditions()
if error_msg:
- log.warning(error_msg)
+ log.error(error_msg)
return False, error_msg
if validate_goal:
error_msg = self.sample_goal_conditions()
if error_msg:
- log.warning(error_msg)
+ log.error(error_msg)
return False, error_msg
error_msg = self.sample_initial_conditions_final()
if error_msg:
- log.warning(error_msg)
+ log.error(error_msg)
return False, error_msg
env.scene.update_initial_state()
@@ -952,14 +953,17 @@ def _get_obs(self, env):
low_dim_obs["robot_ori_cos"] = np.cos(env.robots[0].get_rpy())
low_dim_obs["robot_ori_sin"] = np.sin(env.robots[0].get_rpy())
+ # Batch rpy calculations for much better efficiency
+ objs_rpy = T.quat2euler(np.array([v.states[Pose].get_value()[1] for v in self.object_scope.values()]))
+
i = 0
- for _, v in self.object_scope.items():
+ for idx, v in enumerate(self.object_scope.values()):
# TODO: May need to update checking here to USDObject? Or even baseobject?
if isinstance(v, DatasetObject):
low_dim_obs[f"obj_{i}_valid"] = np.array([1.0])
- low_dim_obs[f"obj_{i}_pos"] = v.get_position()
- low_dim_obs[f"obj_{i}_ori_cos"] = np.cos(v.get_rpy())
- low_dim_obs[f"obj_{i}_ori_sin"] = np.sin(v.get_rpy())
+ low_dim_obs[f"obj_{i}_pos"] = v.states[Pose].get_value()[0]
+ low_dim_obs[f"obj_{i}_ori_cos"] = np.cos(objs_rpy[idx])
+ low_dim_obs[f"obj_{i}_ori_sin"] = np.sin(objs_rpy[idx])
for arm in env.robots[0].arm_names:
grasping_object = env.robots[0].is_grasping(arm=arm, candidate_obj=v)
low_dim_obs[f"obj_{i}_pos_in_gripper_{arm}"] = np.array([float(grasping_object)])
diff --git a/omnigibson/tasks/point_navigation_task.py b/omnigibson/tasks/point_navigation_task.py
index e77174059..3365c5884 100644
--- a/omnigibson/tasks/point_navigation_task.py
+++ b/omnigibson/tasks/point_navigation_task.py
@@ -1,6 +1,7 @@
import numpy as np
import omnigibson as og
+from omnigibson.object_states import Pose
from omnigibson.objects.primitive_object import PrimitiveObject
from omnigibson.reward_functions.collision_reward import CollisionReward
from omnigibson.reward_functions.point_goal_reward import PointGoalReward
@@ -276,7 +277,7 @@ def _get_l2_potential(self, env):
Returns:
float: L2 distance to the target position
"""
- return T.l2_distance(env.robots[self._robot_idn].get_position()[:2], self._goal_pos[:2])
+ return T.l2_distance(env.robots[self._robot_idn].states[Pose].get_value()[0][:2], self._goal_pos[:2])
def get_potential(self, env):
"""
@@ -318,7 +319,7 @@ def _reset_agent(self, env):
# Notify user if we failed to reset a collision-free sampled pose
if not success:
- log.warning("WARNING: Failed to reset robot without collision")
+ log.warning("Failed to reset robot without collision")
# Land the robot
land_object(env.robots[self._robot_idn], initial_pos, initial_quat, env.initial_pos_z_offset)
@@ -363,8 +364,8 @@ def _global_pos_to_robot_frame(self, env, pos):
Returns:
3-array: (x,y,z) position in self._robot_idn agent's local frame
"""
- delta_pos_global = np.array(pos) - env.robots[self._robot_idn].get_position()
- return T.quat2mat(env.robots[self._robot_idn].get_orientation()).T @ delta_pos_global
+ delta_pos_global = np.array(pos) - env.robots[self._robot_idn].states[Pose].get_value()[0]
+ return T.quat2mat(env.robots[self._robot_idn].states[Pose].get_value()[1]).T @ delta_pos_global
def _get_obs(self, env):
# Get relative position of goal with respect to the current agent position
@@ -373,9 +374,9 @@ def _get_obs(self, env):
xy_pos_to_goal = np.array(T.cartesian_to_polar(*xy_pos_to_goal))
# linear velocity and angular velocity
- quat = env.robots[self._robot_idn].get_orientation()
- lin_vel = T.quat2mat(quat).T @ env.robots[self._robot_idn].get_linear_velocity()
- ang_vel = T.quat2mat(quat).T @ env.robots[self._robot_idn].get_angular_velocity()
+ ori_t = T.quat2mat(env.robots[self._robot_idn].states[Pose].get_value()[1]).T
+ lin_vel = ori_t @ env.robots[self._robot_idn].get_linear_velocity()
+ ang_vel = ori_t @ env.robots[self._robot_idn].get_angular_velocity()
# Compose observation dict
low_dim_obs = dict(
@@ -403,7 +404,7 @@ def get_current_pos(self, env):
Returns:
3-array: (x,y,z) global current position representing the robot
"""
- return env.robots[self._robot_idn].get_position()
+ return env.robots[self._robot_idn].states[Pose].get_value()[0]
def get_shortest_path_to_goal(self, env, start_xy_pos=None, entire_path=False):
"""
@@ -421,7 +422,7 @@ def get_shortest_path_to_goal(self, env, start_xy_pos=None, entire_path=False):
- list of 2-array: List of (x,y) waypoints representing the path # TODO: is this true?
- float: geodesic distance of the path to the goal position
"""
- start_xy_pos = env.robots[self._robot_idn].get_position()[:2] if start_xy_pos is None else start_xy_pos
+ start_xy_pos = env.robots[self._robot_idn].states[Pose].get_value()[0][:2] if start_xy_pos is None else start_xy_pos
return env.scene.get_shortest_path(self._floor, start_xy_pos, self._goal_pos[:2], entire_path=entire_path)
def _step_visualization(self, env):
@@ -450,7 +451,7 @@ def step(self, env, action):
self._step_visualization(env=env)
# Update other internal variables
- new_robot_pos = env.robots[self._robot_idn].get_position()
+ new_robot_pos = env.robots[self._robot_idn].states[Pose].get_value()[0]
self._path_length += T.l2_distance(self._current_robot_pos[:2], new_robot_pos[:2])
self._current_robot_pos = new_robot_pos
diff --git a/omnigibson/utils/ui_utils.py b/omnigibson/utils/ui_utils.py
index bbba945be..c963f2e01 100644
--- a/omnigibson/utils/ui_utils.py
+++ b/omnigibson/utils/ui_utils.py
@@ -392,6 +392,10 @@ def record_trajectory_from_waypoints(self, waypoints, per_step_distance, fps, st
"""
# Create splines and their derivatives
n_waypoints = len(waypoints)
+ if n_waypoints < 3:
+ og.log.error("Cannot generate trajectory from waypoints with less than 3 waypoints!")
+ return
+
splines = [CubicSpline(range(n_waypoints), waypoints[:, i], bc_type='clamped') for i in range(3)]
dsplines = [spline.derivative() for spline in splines]
diff --git a/setup.py b/setup.py
index e8712a9fc..3c7782614 100644
--- a/setup.py
+++ b/setup.py
@@ -37,6 +37,7 @@
"opencv-python",
"nest_asyncio",
"imageio",
+ "imageio-ffmpeg",
"termcolor",
"progressbar",
],