From 24af21210353a0fb5370cb78e1d18b92c9d59c18 Mon Sep 17 00:00:00 2001 From: Zhao-Penghai Date: Fri, 6 Sep 2024 00:56:17 +0800 Subject: [PATCH] 240906 --- .gitignore | 3 ++- .idea/misc.xml | 3 +++ single_pred.py | 50 +++++++++++++++++++------------------------------- 3 files changed, 24 insertions(+), 32 deletions(-) diff --git a/.gitignore b/.gitignore index fd0099d..4b168e2 100644 --- a/.gitignore +++ b/.gitignore @@ -48,4 +48,5 @@ official_runs* *.pt *.pth /.idea -prune.py \ No newline at end of file +prune.py +/.idea diff --git a/.idea/misc.xml b/.idea/misc.xml index e8ba46f..edad29d 100644 --- a/.idea/misc.xml +++ b/.idea/misc.xml @@ -1,4 +1,7 @@ + + \ No newline at end of file diff --git a/single_pred.py b/single_pred.py index a1f81ac..e53a3bc 100644 --- a/single_pred.py +++ b/single_pred.py @@ -1,46 +1,34 @@ -from transformers import pipeline -import torch -from datetime import datetime -import torch -from torch.nn.functional import mse_loss -from torch.utils.data import Dataset, DataLoader -from transformers import RobertaTokenizer, AdamW, RobertaForSequenceClassification, AutoTokenizer, \ - AutoModelForSequenceClassification, FlaxLlamaForCausalLM, LlamaForSequenceClassification -import pandas as pd -from torch.utils.tensorboard import SummaryWriter -from torch.utils.data.dataset import random_split -from tqdm import tqdm -import os -import argparse -import json -import torch -from peft import PeftModel,PeftModelForTokenClassification -import os + import torch.nn as nn -from tools.order_metrics import * -import transformers.models.qwen2 -from peft import AutoPeftModelForCausalLM,AutoPeftModelForSequenceClassification,AutoPeftModelForTokenClassification + +from peft import AutoPeftModelForSequenceClassification from transformers import AutoTokenizer -import torch -from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training, TaskType -title = '''xxxx''' -abstract = ''' xxx ''' -model_pth = r"xxx" + + +model_pth = r"xxx" # Warning, you have to modify the "base_model_name_or_path" in adapter_config.json. We will fix this error after the rebuttal. model = AutoPeftModelForSequenceClassification.from_pretrained(model_pth,num_labels=1, load_in_8bit=True,) tokenizer = AutoTokenizer.from_pretrained(model_pth) model = model.to("cuda") model.eval() -# Default Prompt Template -text = f'''Given a certain paper, Title: {title}\n Abstract: {abstract}. \n Predict its normalized academic impact (between 0 and 1):''' -inputs = tokenizer(text, return_tensors="pt") -outputs = model(input_ids=inputs["input_ids"].to("cuda")) -print(outputs['logits']) + +while True: + title = input("Enter a title: ") + abstract = input("Enter a abstract: ") + title = title.replace("\n", "").strip() + abstract = abstract.replace("\n", "").strip() + # Default Prompt Template + text = f'''Given a certain paper, Title: {title}\n Abstract: {abstract}. \n Predict its normalized academic impact (between 0 and 1):''' + inputs = tokenizer(text, return_tensors="pt") + outputs = model(input_ids=inputs["input_ids"].to("cuda")) + # If you haven't modify the LLaMA code. + print(nn.Sigmoid()(outputs['logits'])) + # Else print(outputs['logits'])