Update README.md
Browse files
README.md
CHANGED
|
@@ -23,11 +23,23 @@ npm i @huggingface/transformers
|
|
| 23 |
|
| 24 |
You can then use the model for portrait matting, as follows:
|
| 25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
```js
|
| 27 |
import { AutoModel, AutoProcessor, RawImage } from '@huggingface/transformers';
|
| 28 |
|
| 29 |
// Load model and processor
|
| 30 |
-
const model = await AutoModel.from_pretrained('Xenova/modnet', { dtype:
|
| 31 |
const processor = await AutoProcessor.from_pretrained('Xenova/modnet');
|
| 32 |
|
| 33 |
// Load image from URL
|
|
|
|
| 23 |
|
| 24 |
You can then use the model for portrait matting, as follows:
|
| 25 |
|
| 26 |
+
```js
|
| 27 |
+
import { pipeline } from '@huggingface/transformers';
|
| 28 |
+
|
| 29 |
+
const segmenter = await pipeline('background-removal', 'Xenova/modnet', { dtype: 'fp32' });
|
| 30 |
+
const url = 'https://images.pexels.com/photos/5965592/pexels-photo-5965592.jpeg?auto=compress&cs=tinysrgb&w=1024';
|
| 31 |
+
const output = await segmenter(url);
|
| 32 |
+
output[0].save('mask.png');
|
| 33 |
+
// You can also use `output[0].toCanvas()` or `await output[0].toBlob()` if you would like to access the output without saving.
|
| 34 |
+
```
|
| 35 |
+
|
| 36 |
+
Or with the `AutoModel` and `AutoProcessor` APIs:
|
| 37 |
+
|
| 38 |
```js
|
| 39 |
import { AutoModel, AutoProcessor, RawImage } from '@huggingface/transformers';
|
| 40 |
|
| 41 |
// Load model and processor
|
| 42 |
+
const model = await AutoModel.from_pretrained('Xenova/modnet', { dtype: 'fp32' });
|
| 43 |
const processor = await AutoProcessor.from_pretrained('Xenova/modnet');
|
| 44 |
|
| 45 |
// Load image from URL
|