gsarti commited on
Commit
d4ac83c
·
1 Parent(s): ed5dc9f

Update to LXT 2.1

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. app.py +11 -8
  3. requirements.txt +2 -1
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🌴
4
  colorFrom: blue
5
  colorTo: green
6
  sdk: gradio
7
- sdk_version: 5.35.0
8
  app_file: app.py
9
  pinned: true
10
  license: apache-2.0
 
4
  colorFrom: blue
5
  colorTo: green
6
  sdk: gradio
7
+ sdk_version: 6.0.1
8
  app_file: app.py
9
  pinned: true
10
  license: apache-2.0
app.py CHANGED
@@ -9,12 +9,15 @@ import spaces
9
  from bm25s.hf import BM25HF
10
  from citations import inseq_citation, inseq_xai_citation, lxt_citation, mirage_citation, pecore_citation
11
  from examples import examples
12
- from lxt.functional import add2, mul2, softmax
13
- from lxt.models.llama import LlamaForCausalLM, attnlrp
14
  from rerankers import Reranker
15
  from style import custom_css
16
  from tqdm import tqdm
17
  from transformers import AutoTokenizer
 
 
 
18
 
19
  from inseq import load_model, register_step_function
20
  from inseq.attr import StepFunctionArgs
@@ -37,10 +40,10 @@ model_registry = {}
37
  def get_model(model_size):
38
  model_id = f"HuggingFaceTB/SmolLM-{model_size}-Instruct"
39
  if model_id not in model_registry:
40
- hf_model = LlamaForCausalLM.from_pretrained(model_id)
41
- tokenizer = AutoTokenizer.from_pretrained(model_id)
42
- attnlrp.register(hf_model)
43
- model = load_model(hf_model, "saliency", tokenizer=tokenizer)
44
  model.bos_token = "<|endoftext|>"
45
  model.bos_token_id = 0
46
  model_registry[model_id] = model
@@ -207,7 +210,7 @@ register_step_function(
207
  )
208
 
209
 
210
- with gr.Blocks(css=custom_css) as demo:
211
  with gr.Row():
212
  with gr.Column(min_width=500):
213
  gr.HTML(
@@ -388,5 +391,5 @@ with gr.Blocks(css=custom_css) as demo:
388
  )
389
 
390
  demo.queue(api_open=False, max_size=20).launch(
391
- allowed_paths=["img/", "outputs/"], show_api=False
392
  )
 
9
  from bm25s.hf import BM25HF
10
  from citations import inseq_citation, inseq_xai_citation, lxt_citation, mirage_citation, pecore_citation
11
  from examples import examples
12
+ from lxt.efficient import monkey_patch
13
+ from lxt.explicit.functional import add2, mul2, softmax
14
  from rerankers import Reranker
15
  from style import custom_css
16
  from tqdm import tqdm
17
  from transformers import AutoTokenizer
18
+ from transformers.models.llama import modeling_llama
19
+
20
+ monkey_patch(modeling_llama, verbose=False)
21
 
22
  from inseq import load_model, register_step_function
23
  from inseq.attr import StepFunctionArgs
 
40
  def get_model(model_size):
41
  model_id = f"HuggingFaceTB/SmolLM-{model_size}-Instruct"
42
  if model_id not in model_registry:
43
+ model = load_model(model_id, "input_x_gradient")
44
+ # deactive gradients on parameters to save memory
45
+ for param in model.parameters():
46
+ param.requires_grad = False
47
  model.bos_token = "<|endoftext|>"
48
  model.bos_token_id = 0
49
  model_registry[model_id] = model
 
210
  )
211
 
212
 
213
+ with gr.Blocks() as demo:
214
  with gr.Row():
215
  with gr.Column(min_width=500):
216
  gr.HTML(
 
391
  )
392
 
393
  demo.queue(api_open=False, max_size=20).launch(
394
+ css=custom_css, allowed_paths=["img/", "outputs/"]
395
  )
requirements.txt CHANGED
@@ -3,4 +3,5 @@ git+https://github.com/inseq-team/inseq.git@v07
3
  bm25s
4
  gradio-iframe
5
  rerankers[transformers]
6
- lxt==2.1
 
 
3
  bm25s
4
  gradio-iframe
5
  rerankers[transformers]
6
+ lxt==2.1
7
+ huggingface-hub==0.36.0