From 80ee91d4e27cc82fb7638bc6de622eebb53cfea6 Mon Sep 17 00:00:00 2001 From: Lev Date: Thu, 22 Feb 2024 00:42:54 -0800 Subject: [PATCH 1/2] Support for google/pegasus-xsum --- .../src/bindings/transformers/transformers.py | 33 +++++++++++++++---- 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/pgml-extension/src/bindings/transformers/transformers.py b/pgml-extension/src/bindings/transformers/transformers.py index fadde8858..c609ffef1 100644 --- a/pgml-extension/src/bindings/transformers/transformers.py +++ b/pgml-extension/src/bindings/transformers/transformers.py @@ -41,7 +41,9 @@ PegasusTokenizer, TrainingArguments, Trainer, - GPTQConfig + GPTQConfig, + PegasusForConditionalGeneration, + PegasusTokenizer, ) import threading @@ -241,7 +243,6 @@ def __next__(self): self.q.task_done() return v - class StandardPipeline(object): def __init__(self, model_name, **kwargs): # the default pipeline constructor doesn't pass all the kwargs (particularly load_in_4bit) @@ -254,6 +255,8 @@ def __init__(self, model_name, **kwargs): if "use_auth_token" in kwargs: kwargs["token"] = kwargs.pop("use_auth_token") + self.model_name = model_name + if ( "task" in kwargs and model_name is not None @@ -278,7 +281,11 @@ def __init__(self, model_name, **kwargs): model_name, **kwargs ) elif self.task == "summarization" or self.task == "translation": - self.model = AutoModelForSeq2SeqLM.from_pretrained(model_name, **kwargs) + if model_name == "google/pegasus-xsum": + # HF auto model doesn't detect GPUs + self.model = PegasusForConditionalGeneration.from_pretrained(model_name) + else: + self.model = AutoModelForSeq2SeqLM.from_pretrained(model_name, **kwargs) elif self.task == "text-generation" or self.task == "conversational": # See: https://huggingface.co/docs/transformers/main/quantization if "quantization_config" in kwargs: @@ -290,17 +297,31 @@ def __init__(self, model_name, **kwargs): else: raise PgMLException(f"Unhandled task: {self.task}") + if model_name == "google/pegasus-xsum": + kwargs.pop("token", None) + if "token" in kwargs: self.tokenizer = AutoTokenizer.from_pretrained( model_name, token=kwargs["token"] ) else: - self.tokenizer = AutoTokenizer.from_pretrained(model_name) + if model_name == "google/pegasus-xsum": + self.tokenizer = PegasusTokenizer.from_pretrained(model_name) + else: + self.tokenizer = AutoTokenizer.from_pretrained(model_name) + + pipe_kwargs = { + "model": self.model, + "tokenizer": self.tokenizer, + } + + # https://huggingface.co/docs/transformers/en/model_doc/pegasus + if model_name == "google/pegasus-xsum": + pipe_kwargs["device"] = kwargs.get("device", "cpu") self.pipe = transformers.pipeline( self.task, - model=self.model, - tokenizer=self.tokenizer, + **pipe_kwargs, ) else: self.pipe = transformers.pipeline(**kwargs) From b9a6eb0d4ca5f194c659ad2f621a038d242818ce Mon Sep 17 00:00:00 2001 From: Lev Date: Thu, 22 Feb 2024 00:43:23 -0800 Subject: [PATCH 2/2] black --- .../src/bindings/transformers/transformers.py | 24 ++++++++++++------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/pgml-extension/src/bindings/transformers/transformers.py b/pgml-extension/src/bindings/transformers/transformers.py index c609ffef1..9390cac44 100644 --- a/pgml-extension/src/bindings/transformers/transformers.py +++ b/pgml-extension/src/bindings/transformers/transformers.py @@ -243,6 +243,7 @@ def __next__(self): self.q.task_done() return v + class StandardPipeline(object): def __init__(self, model_name, **kwargs): # the default pipeline constructor doesn't pass all the kwargs (particularly load_in_4bit) @@ -283,17 +284,25 @@ def __init__(self, model_name, **kwargs): elif self.task == "summarization" or self.task == "translation": if model_name == "google/pegasus-xsum": # HF auto model doesn't detect GPUs - self.model = PegasusForConditionalGeneration.from_pretrained(model_name) + self.model = PegasusForConditionalGeneration.from_pretrained( + model_name + ) else: - self.model = AutoModelForSeq2SeqLM.from_pretrained(model_name, **kwargs) + self.model = AutoModelForSeq2SeqLM.from_pretrained( + model_name, **kwargs + ) elif self.task == "text-generation" or self.task == "conversational": # See: https://huggingface.co/docs/transformers/main/quantization if "quantization_config" in kwargs: quantization_config = kwargs.pop("quantization_config") quantization_config = GPTQConfig(**quantization_config) - self.model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=quantization_config, **kwargs) + self.model = AutoModelForCausalLM.from_pretrained( + model_name, quantization_config=quantization_config, **kwargs + ) else: - self.model = AutoModelForCausalLM.from_pretrained(model_name, **kwargs) + self.model = AutoModelForCausalLM.from_pretrained( + model_name, **kwargs + ) else: raise PgMLException(f"Unhandled task: {self.task}") @@ -341,7 +350,7 @@ def stream(self, input, timeout=None, **kwargs): self.tokenizer, timeout=timeout, skip_prompt=True, - skip_special_tokens=True + skip_special_tokens=True, ) if "chat_template" in kwargs: input = self.tokenizer.apply_chat_template( @@ -364,9 +373,7 @@ def stream(self, input, timeout=None, **kwargs): ) else: streamer = TextIteratorStreamer( - self.tokenizer, - timeout=timeout, - skip_special_tokens=True + self.tokenizer, timeout=timeout, skip_special_tokens=True ) input = self.tokenizer(input, return_tensors="pt", padding=True).to( self.model.device @@ -517,7 +524,6 @@ def embed(transformer, inputs, kwargs): return embed_using(model, transformer, inputs, kwargs) - def clear_gpu_cache(memory_usage: None): if not torch.cuda.is_available(): raise PgMLException(f"No GPU available") pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy