sample_inference.py
6.6 KB · 143 lines · python Raw
1
2
3 from PIL import Image
4 import requests
5 import torch
6 from transformers import AutoModelForCausalLM
7 from transformers import AutoProcessor
8 model_path = "./"
9
10 kwargs = {}
11 kwargs['torch_dtype'] = torch.bfloat16
12
13 processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True)
14 model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True, torch_dtype="auto", _attn_implementation='flash_attention_2').cuda()
15
16 user_prompt = '<|user|>\n'
17 assistant_prompt = '<|assistant|>\n'
18 prompt_suffix = "<|end|>\n"
19
20 #################################################### text-only ####################################################
21 prompt = f"{user_prompt}what is the answer for 1+1? Explain it.{prompt_suffix}{assistant_prompt}"
22 print(f">>> Prompt\n{prompt}")
23 inputs = processor(prompt, images=None, return_tensors="pt").to("cuda:0")
24 generate_ids = model.generate(**inputs,
25 max_new_tokens=1000,
26 eos_token_id=processor.tokenizer.eos_token_id,
27 )
28 generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
29 response = processor.batch_decode(generate_ids,
30 skip_special_tokens=True,
31 clean_up_tokenization_spaces=False)[0]
32 print(f'>>> Response\n{response}')
33
34 #################################################### text-only 2 ####################################################
35 prompt = f"{user_prompt}Give me the code for sloving two-sum problem.{prompt_suffix}{assistant_prompt}"
36 print(f">>> Prompt\n{prompt}")
37 inputs = processor(prompt, images=None, return_tensors="pt").to("cuda:0")
38 generate_ids = model.generate(**inputs,
39 max_new_tokens=1000,
40 eos_token_id=processor.tokenizer.eos_token_id,
41 )
42 generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
43 response = processor.batch_decode(generate_ids,
44 skip_special_tokens=True,
45 clean_up_tokenization_spaces=False)[0]
46 print(f'>>> Response\n{response}')
47
48
49 #################################################### EXAMPLE 1 ####################################################
50 # single-image prompt
51 prompt = f"{user_prompt}<|image_1|>\nWhat is shown in this image?{prompt_suffix}{assistant_prompt}"
52 url = "https://www.ilankelman.org/stopsigns/australia.jpg"
53 print(f">>> Prompt\n{prompt}")
54 image = Image.open(requests.get(url, stream=True).raw)
55 inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
56 generate_ids = model.generate(**inputs,
57 max_new_tokens=1000,
58 eos_token_id=processor.tokenizer.eos_token_id,
59 )
60 generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
61 response = processor.batch_decode(generate_ids,
62 skip_special_tokens=True,
63 clean_up_tokenization_spaces=False)[0]
64 print(f'>>> Response\n{response}')
65
66 #################################################### EXAMPLE 2 ####################################################
67 # chat template
68 chat = [
69 {"role": "user", "content": "<|image_1|>\nWhat is shown in this image?"},
70 {"role": "assistant", "content": "The image depicts a street scene with a prominent red stop sign in the foreground. The background showcases a building with traditional Chinese architecture, characterized by its red roof and ornate decorations. There are also several statues of lions, which are common in Chinese culture, positioned in front of the building. The street is lined with various shops and businesses, and there's a car passing by."},
71 {"role": "user", "content": "What is so special about this image"}
72 ]
73 url = "https://www.ilankelman.org/stopsigns/australia.jpg"
74 image = Image.open(requests.get(url, stream=True).raw)
75 prompt = processor.tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
76 # need to remove last <|endoftext|> if it is there, which is used for training, not inference. For training, make sure to add <|endoftext|> in the end.
77 if prompt.endswith("<|endoftext|>"):
78 prompt = prompt.rstrip("<|endoftext|>")
79
80 print(f">>> Prompt\n{prompt}")
81
82 inputs = processor(prompt, [image], return_tensors="pt").to("cuda:0")
83 generate_ids = model.generate(**inputs,
84 max_new_tokens=1000,
85 eos_token_id=processor.tokenizer.eos_token_id,
86 )
87 generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
88 response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
89 print(f'>>> Response\n{response}')
90
91
92 ############################# to markdown #############################
93 # single-image prompt
94 prompt = f"{user_prompt}<|image_1|>\nCan you convert the table to markdown format?{prompt_suffix}{assistant_prompt}"
95 url = "https://support.content.office.net/en-us/media/3dd2b79b-9160-403d-9967-af893d17b580.png"
96 image = Image.open(requests.get(url, stream=True).raw)
97 inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
98
99 print(f">>> Prompt\n{prompt}")
100 generate_ids = model.generate(**inputs,
101 max_new_tokens=1000,
102 eos_token_id=processor.tokenizer.eos_token_id,
103 )
104 generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
105 response = processor.batch_decode(generate_ids,
106 skip_special_tokens=False,
107 clean_up_tokenization_spaces=False)[0]
108 print(f'>>> Response\n{response}')
109
110
111 ########################### multi-frame ################################
112
113 images = []
114 placeholder = ""
115 for i in range(1,20):
116 url = f"https://image.slidesharecdn.com/azureintroduction-191206101932/75/Introduction-to-Microsoft-Azure-Cloud-{i}-2048.jpg"
117 images.append(Image.open(requests.get(url, stream=True).raw))
118 placeholder += f"<|image_{i}|>\n"
119
120 messages = [
121 {"role": "user", "content": placeholder+"Summarize the deck of slides."},
122 ]
123
124
125 prompt = processor.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
126
127 inputs = processor(prompt, images, return_tensors="pt").to("cuda:0")
128
129 generation_args = {
130 "max_new_tokens": 1000,
131 "temperature": 0.0,
132 "do_sample": False,
133 }
134
135 generate_ids = model.generate(**inputs, eos_token_id=processor.tokenizer.eos_token_id, **generation_args)
136
137 # remove input tokens
138 generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
139 response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
140
141 print(response)
142
143