Upload folder using huggingface_hub
Browse files- README.md +12 -0
- config.json +61 -0
- meta.json +1 -0
- model.safetensors +3 -0
README.md
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: lerobot
|
3 |
+
tags:
|
4 |
+
- diffusion-policy
|
5 |
+
- model_hub_mixin
|
6 |
+
- pytorch_model_hub_mixin
|
7 |
+
- robotics
|
8 |
+
---
|
9 |
+
|
10 |
+
This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
|
11 |
+
- Library: https://github.com/huggingface/lerobot
|
12 |
+
- Docs: [More Information Needed]
|
config.json
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"beta_end": 0.02,
|
3 |
+
"beta_schedule": "squaredcos_cap_v2",
|
4 |
+
"beta_start": 0.0001,
|
5 |
+
"clip_sample": true,
|
6 |
+
"clip_sample_range": 1.0,
|
7 |
+
"crop_is_random": true,
|
8 |
+
"crop_shape": [
|
9 |
+
140,
|
10 |
+
215
|
11 |
+
],
|
12 |
+
"diffusion_step_embed_dim": 128,
|
13 |
+
"do_mask_loss_for_padding": false,
|
14 |
+
"down_dims": [
|
15 |
+
512,
|
16 |
+
1024,
|
17 |
+
2048
|
18 |
+
],
|
19 |
+
"horizon": 8,
|
20 |
+
"input_normalization_modes": {
|
21 |
+
"observation.images.left": "mean_std",
|
22 |
+
"observation.images.right": "mean_std",
|
23 |
+
"observation.state": "mean_std"
|
24 |
+
},
|
25 |
+
"input_shapes": {
|
26 |
+
"observation.images.left": [
|
27 |
+
3,
|
28 |
+
160,
|
29 |
+
240
|
30 |
+
],
|
31 |
+
"observation.images.right": [
|
32 |
+
3,
|
33 |
+
160,
|
34 |
+
240
|
35 |
+
],
|
36 |
+
"observation.state": [
|
37 |
+
1
|
38 |
+
]
|
39 |
+
},
|
40 |
+
"kernel_size": 5,
|
41 |
+
"n_action_steps": 8,
|
42 |
+
"n_groups": 8,
|
43 |
+
"n_obs_steps": 1,
|
44 |
+
"noise_scheduler_type": "DDIM",
|
45 |
+
"num_inference_steps": 10,
|
46 |
+
"num_train_timesteps": 100,
|
47 |
+
"output_normalization_modes": {
|
48 |
+
"action": "min_max"
|
49 |
+
},
|
50 |
+
"output_shapes": {
|
51 |
+
"action": [
|
52 |
+
7
|
53 |
+
]
|
54 |
+
},
|
55 |
+
"prediction_type": "epsilon",
|
56 |
+
"pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
|
57 |
+
"spatial_softmax_num_keypoints": 32,
|
58 |
+
"use_film_scale_modulation": true,
|
59 |
+
"use_group_norm": false,
|
60 |
+
"vision_backbone": "resnet18"
|
61 |
+
}
|
meta.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"loss": 0.010587829165160656, "time": 1729753983.222122, "epoch": 400.0, "step": 4000, "params": {"epochs": 600, "root": "episodes", "repo_id": "Frontier-Machines/move_to_duct_tape-20241023_161851", "frequency": 10, "img_width": 240, "img_height": 160, "gripper_dof": 0, "position_dof": 3, "rotation_dof": 4, "batch_size": 128, "log_freq": 50, "checkpoint_freq": 50, "observation_length": 1, "inference_steps": 10}}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e1c9e38e604e0515d26a431cc28c780478c70dee89e5b55bf019291247260e32
|
3 |
+
size 1050636108
|