Update README.md
Browse files
README.md
CHANGED
|
@@ -8,27 +8,27 @@ https://huggingface.co/facebook/sam-vit-base with ONNX weights to be compatible
|
|
| 8 |
|
| 9 |
## Usage (Transformers.js)
|
| 10 |
|
| 11 |
-
If you haven't already, you can install the [Transformers.js](https://huggingface.co/docs/transformers.js) JavaScript library from [NPM](https://www.npmjs.com/package/@
|
| 12 |
```bash
|
| 13 |
-
npm i @
|
| 14 |
```
|
| 15 |
|
| 16 |
**Example:** Perform mask generation with `Xenova/sam-vit-base`.
|
| 17 |
|
| 18 |
```js
|
| 19 |
-
import { SamModel, AutoProcessor, RawImage } from
|
| 20 |
|
| 21 |
// Load model and processor
|
| 22 |
-
const model = await SamModel.from_pretrained(
|
| 23 |
-
const processor = await AutoProcessor.from_pretrained(
|
| 24 |
|
| 25 |
// Prepare image and input points
|
| 26 |
-
const img_url =
|
| 27 |
const raw_image = await RawImage.read(img_url);
|
| 28 |
-
const input_points = [[[340, 250]]];
|
| 29 |
|
| 30 |
// Process inputs and perform mask generation
|
| 31 |
-
const inputs = await processor(raw_image, input_points);
|
| 32 |
const outputs = await model(inputs);
|
| 33 |
|
| 34 |
// Post-process masks
|
|
@@ -48,9 +48,9 @@ console.log(scores);
|
|
| 48 |
// dims: [ 1, 1, 3 ],
|
| 49 |
// type: 'float32',
|
| 50 |
// data: Float32Array(3) [
|
| 51 |
-
// 0.
|
| 52 |
-
//
|
| 53 |
-
// 0.
|
| 54 |
// ],
|
| 55 |
// size: 3
|
| 56 |
// }
|
|
|
|
| 8 |
|
| 9 |
## Usage (Transformers.js)
|
| 10 |
|
| 11 |
+
If you haven't already, you can install the [Transformers.js](https://huggingface.co/docs/transformers.js) JavaScript library from [NPM](https://www.npmjs.com/package/@huggingface/transformers) using:
|
| 12 |
```bash
|
| 13 |
+
npm i @huggingface/transformers
|
| 14 |
```
|
| 15 |
|
| 16 |
**Example:** Perform mask generation with `Xenova/sam-vit-base`.
|
| 17 |
|
| 18 |
```js
|
| 19 |
+
import { SamModel, AutoProcessor, RawImage } from "@huggingface/transformers";
|
| 20 |
|
| 21 |
// Load model and processor
|
| 22 |
+
const model = await SamModel.from_pretrained("Xenova/sam-vit-base");
|
| 23 |
+
const processor = await AutoProcessor.from_pretrained("Xenova/sam-vit-base");
|
| 24 |
|
| 25 |
// Prepare image and input points
|
| 26 |
+
const img_url = "https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/corgi.jpg";
|
| 27 |
const raw_image = await RawImage.read(img_url);
|
| 28 |
+
const input_points = [[[340, 250]]];
|
| 29 |
|
| 30 |
// Process inputs and perform mask generation
|
| 31 |
+
const inputs = await processor(raw_image, { input_points });
|
| 32 |
const outputs = await model(inputs);
|
| 33 |
|
| 34 |
// Post-process masks
|
|
|
|
| 48 |
// dims: [ 1, 1, 3 ],
|
| 49 |
// type: 'float32',
|
| 50 |
// data: Float32Array(3) [
|
| 51 |
+
// 0.9742214679718018,
|
| 52 |
+
// 1.002995491027832,
|
| 53 |
+
// 0.9613651037216187
|
| 54 |
// ],
|
| 55 |
// size: 3
|
| 56 |
// }
|