Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,10 @@ MANIFEST
*.manifest
*.spec

# pictures
*.png
*.jpg

# Installer logs
pip-log.txt
pip-delete-this-directory.txt
Expand Down
161 changes: 161 additions & 0 deletions data/datasets/source/process_data_demogen.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,161 @@
import pickle, os
import numpy as np
import pdb
from copy import deepcopy
import zarr
import shutil
import argparse
import yaml
import cv2
import h5py


def load_hdf5(dataset_path):
if not os.path.isfile(dataset_path):
print(f"Dataset does not exist at \n{dataset_path}\n")
exit()

with h5py.File(dataset_path, "r") as root:
left_gripper, left_arm = (
root["/joint_action/left_gripper"][()],
root["/joint_action/left_arm"][()],
)
right_gripper, right_arm = (
root["/joint_action/right_gripper"][()],
root["/joint_action/right_arm"][()],
)
vector = root["/joint_action/vector"][()]
pointcloud = root["/pointcloud"][()]

return left_gripper, left_arm, right_gripper, right_arm, vector, pointcloud


def main():

parser = argparse.ArgumentParser(description="Process some episodes.")
parser.add_argument(
"task_name",
type=str,
help="The name of the task (e.g., beat_block_hammer)",
)
parser.add_argument("task_config", type=str)
parser.add_argument(
"expert_data_num",
type=int,
help="Number of episodes to process (e.g., 50)",
)
parser.add_argument(
"episode_indices",
type=str,
help="Python list of episode indices to process, e.g. [0,2,5]",
)
args = parser.parse_args()

task_name = args.task_name
num = args.expert_data_num
task_config = args.task_config
# 解析 episode_indices 参数
try:
episode_indices = eval(args.episode_indices)
assert isinstance(episode_indices, list)
assert len(episode_indices) == num
except Exception as e:
print("Error parsing episode_indices, should be Python list like [0,2,5]")
raise

load_dir = "../../data/" + str(task_name) + "/" + str(task_config)

total_count = 0

idx=episode_indices[0] # Use the first episode index for naming
save_dir = f"./data/{task_name}-{task_config}-{num}-demogen-{idx}.zarr"

if os.path.exists(save_dir):
shutil.rmtree(save_dir)

zarr_root = zarr.group(save_dir)
zarr_data = zarr_root.create_group("data")
zarr_meta = zarr_root.create_group("meta")

point_cloud_arrays = []
episode_ends_arrays, action_arrays, state_arrays, joint_action_arrays = (
[],
[],
[],
[],
)

for idx, ep_num in enumerate(episode_indices):
print(f"processing episode: {idx + 1} / {num} (episode{ep_num})", end="\r")
load_path = os.path.join(load_dir, f"data/episode{ep_num}.hdf5")
(
left_gripper_all,
left_arm_all,
right_gripper_all,
right_arm_all,
vector_all,
pointcloud_all,
) = load_hdf5(load_path)

for j in range(0, left_gripper_all.shape[0]):
pointcloud = pointcloud_all[j]
joint_state = vector_all[j]
if j != left_gripper_all.shape[0] - 1:
point_cloud_arrays.append(pointcloud)
state_arrays.append(joint_state)
if j != 0:
joint_action_arrays.append(joint_state)

total_count += left_gripper_all.shape[0] - 1
episode_ends_arrays.append(total_count)

print()
try:
episode_ends_arrays = np.array(episode_ends_arrays)
state_arrays = np.array(state_arrays)
point_cloud_arrays = np.array(point_cloud_arrays)
joint_action_arrays = np.array(joint_action_arrays)

compressor = zarr.Blosc(cname="zstd", clevel=3, shuffle=1)
state_chunk_size = (100, state_arrays.shape[1])
joint_chunk_size = (100, joint_action_arrays.shape[1])
point_cloud_chunk_size = (100, point_cloud_arrays.shape[1])
zarr_data.create_dataset(
"point_cloud",
data=point_cloud_arrays,
chunks=point_cloud_chunk_size,
overwrite=True,
compressor=compressor,
)
zarr_data.create_dataset(
"state",
data=state_arrays,
chunks=state_chunk_size,
dtype="float32",
overwrite=True,
compressor=compressor,
)
zarr_data.create_dataset(
"action",
data=joint_action_arrays,
chunks=joint_chunk_size,
dtype="float32",
overwrite=True,
compressor=compressor,
)
zarr_meta.create_dataset(
"episode_ends",
data=episode_ends_arrays,
dtype="int64",
overwrite=True,
compressor=compressor,
)
except ZeroDivisionError as e:
print("If you get a `ZeroDivisionError: division by zero`, check that `data/pointcloud` in the task config is set to true.")
raise
except Exception as e:
print(f"An unexpected error occurred ({type(e).__name__}): {e}")
raise

if __name__ == "__main__":
main()
Binary file removed data/sam_mask/banana/0/banana.png
Binary file not shown.
Binary file removed data/sam_mask/banana/0/basket.png
Binary file not shown.
Binary file removed data/sam_mask/banana/0/source.jpg
Binary file not shown.
Binary file removed data/sam_mask/banana/1/banana.png
Binary file not shown.
Binary file removed data/sam_mask/banana/1/basket.png
Binary file not shown.
Binary file removed data/sam_mask/banana/1/source.jpg
Binary file not shown.
Binary file removed data/sam_mask/banana/2/banana.png
Binary file not shown.
Binary file removed data/sam_mask/banana/2/basket.png
Binary file not shown.
Binary file removed data/sam_mask/banana/2/source.jpg
Binary file not shown.
Binary file removed data/sam_mask/egg/0/plate.jpg
Binary file not shown.
Binary file removed data/sam_mask/egg/0/source.jpg
Binary file not shown.
Binary file removed data/sam_mask/egg/1/plate.jpg
Binary file not shown.
Binary file removed data/sam_mask/egg/1/source.jpg
Binary file not shown.
Binary file removed data/sam_mask/egg/2/plate.jpg
Binary file not shown.
Binary file removed data/sam_mask/egg/2/source.jpg
Binary file not shown.
Binary file removed data/sam_mask/egg/check/plate_0.jpg
Binary file not shown.
Binary file removed data/sam_mask/egg/check/plate_1.jpg
Binary file not shown.
Binary file removed data/sam_mask/egg/check/plate_2.jpg
Binary file not shown.
Binary file removed data/sam_mask/flower/0/flowers tied with twine.jpg
Binary file not shown.
Binary file removed data/sam_mask/flower/0/source.jpg
Binary file not shown.
Binary file removed data/sam_mask/flower/0/white vase.jpg
Binary file not shown.
Binary file not shown.
Binary file removed data/sam_mask/flower/1/source.jpg
Binary file not shown.
Binary file removed data/sam_mask/flower/1/white vase.jpg
Binary file not shown.
Binary file removed data/sam_mask/flower/2/flowers tied with twine.jpg
Binary file not shown.
Binary file removed data/sam_mask/flower/2/source.jpg
Diff not rendered.
Binary file removed data/sam_mask/flower/2/white vase.jpg
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Binary file removed data/sam_mask/flower/check/white vase_0.jpg
Diff not rendered.
Binary file removed data/sam_mask/flower/check/white vase_1.jpg
Diff not rendered.
Binary file removed data/sam_mask/flower/check/white vase_2.jpg
Diff not rendered.
6 changes: 3 additions & 3 deletions data/sam_mask/get_mask.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,12 @@ def save_binary_mask(mask, output_path):


model = LangSAM()
image_pil = Image.open("./assets/source_0.jpg").convert("RGB")
text_prompt = "green basket in the middle."
image_pil = Image.open("./assets/source_0.jpg").convert("RGB")
text_prompt = "an iron hammer with a black handle" # 此处需修改
results = model.predict([image_pil], [text_prompt])

masks = results[0]["masks"]
print(masks.shape)
assert masks.shape[0] == 1

save_binary_mask(masks[0], f"assets/outputs/basket_0.png")
save_binary_mask(masks[0], f"assets/outputs/hammer_0.png") # 此处需修改
64 changes: 64 additions & 0 deletions data/sam_mask/get_mask_bk.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
# https://github.com/luca-medeiros/lang-segment-anything

from PIL import Image
from lang_sam import LangSAM
import numpy as np


# Function to apply transparent masks on the image
def save_mask_results(image, masks, output_path):
# Ensure the image is in RGBA mode (with transparency support)
image = image_pil.convert("RGBA")

# Iterate over the binary masks
for mask in masks:
# Convert mask to the same size as the image
mask_resized = Image.fromarray(mask).resize(image.size, Image.NEAREST).convert("L")

# Create an RGBA mask with the same size
mask_rgba = mask_resized.convert("RGBA")

# Get the alpha channel from the mask to apply transparency
mask_rgba = np.array(mask_rgba)
alpha_channel = mask_rgba[:, :, 0] # Assuming the mask is single-channel

# Create a new transparent image with the same size as the input image
transparent_overlay = np.zeros((image.height, image.width, 4), dtype=np.uint8)

# Set the red, green, and blue channels to some color (e.g., red)
transparent_overlay[:, :, 0] = 255 # Red channel
transparent_overlay[:, :, 1] = 0 # Green channel
transparent_overlay[:, :, 2] = 0 # Blue channel

# Set the alpha channel to apply transparency from the mask
transparent_overlay[:, :, 3] = alpha_channel

# Convert the overlay to an Image object
overlay_image = Image.fromarray(transparent_overlay, "RGBA")

# Composite the overlay with the original image
image = Image.alpha_composite(image, overlay_image)

# Save the resulting image
image.save(output_path, "PNG")

# Function to save a binary mask as an image
def save_binary_mask(mask, output_path):
# Convert the binary mask to a PIL Image object
# Convert the mask to uint8 type (0 or 255)
mask_image = Image.fromarray((mask * 255).astype(np.uint8)) # Multiply by 255 to make 1 white and 0 black

# Save the mask as an image (you can choose PNG, JPEG, etc.)
mask_image.save(output_path)


model = LangSAM()
image_pil = Image.open("./assets/source_0.jpg").convert("RGB")
text_prompt = "green basket in the middle."
results = model.predict([image_pil], [text_prompt])

masks = results[0]["masks"]
print(masks.shape)
assert masks.shape[0] == 1

save_binary_mask(masks[0], f"assets/outputs/basket_0.png")
1 change: 1 addition & 0 deletions data/sam_mask/lang-segment-anything/.dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
.venv
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
name: 🐛 Bugs
description: Report bugs
labels: ["bug"]
title: "Please read & provide the following"
body:
- type: markdown
attributes:
value: |
## Instructions To Reproduce the 🐛 Bug:

1. Background explanation

- type: textarea
attributes:
label: "Full runnable code or full changes you made:"
description: Please provide the code or changes that led to the bug.
placeholder: |
```
```
validations:
required: true

- type: textarea
attributes:
label: "What exact command you ran:"
description: Describe the exact command you ran that triggered the bug.
validations:
required: true

- type: markdown
attributes:
value: |
1. Please simplify the steps as much as possible so they do not require additional resources to
run, such as a private dataset.

## Expected behavior:

If there are no obvious errors in "full logs" provided above,
please tell us the expected behavior.

- type: textarea
attributes:
label: Expected behavior
description: Describe the expected behavior if the bug had not occurred.
validations:
required: true

- type: checkboxes
attributes:
label: Environment
description: Indicate your environment details.
options:
- label: "I'm using the latest version!"
- label: "It's not a user-side mistake!"
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# require an issue template to be chosen
blank_issues_enabled: false

# Unexpected behaviors & bugs are split to two templates.
# When they are one template, users think "it's not a bug" and don't choose the template.
#
# But the file name is still "unexpected-problems-bugs.md" so that old references
# to this issue template still works.
# It's ok since this template should be a superset of "bugs.md" (unexpected behaviors is a superset of bugs)
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
name: 📚 Documentation Issue
description: Report a problem about existing documentation or comments.
labels: ["documentation"]
body:
- type: markdown
attributes:
value: |
## 📚 Documentation Issue

This issue category is for problems about existing documentation, not for asking how-to questions.

- type: input
attributes:
label: Provide a link to existing documentation/comment
description: Paste the URL or path to the documentation or comment that has an issue.
placeholder: "https://example.com/docs/section"
validations:
required: true

- type: textarea
attributes:
label: How should the above documentation/comment improve?
description: Describe the changes or improvements that should be made to the documentation or comment.
placeholder: "Please describe the suggested improvements here."
validations:
required: true
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
name: 🚀 Feature Request
description: Suggest an improvement or new feature
labels: ["enhancement"]
body:
- type: markdown
attributes:
value: |
## 🚀 Feature

A clear and concise description of the feature proposal.

- type: textarea
attributes:
label: Motivation & Examples
description: |
Tell us why the feature is useful.

Describe what the feature would look like if it is implemented. Best demonstrated using **code examples** in addition to words.
placeholder: |
<put sample here>
validations:
required: true

- type: markdown
attributes:
value: |
## Note

We only consider adding new features if they are relevant to this library.
Consider if this new feature deserves to be here or should be a new library.
Loading