roselee commited on
Commit
74f7bde
·
1 Parent(s): e542161

Upload tool

Browse files
Files changed (4) hide show
  1. app.py +4 -0
  2. code_generating.py +51 -0
  3. requirements.txt +1 -0
  4. tool_config.json +5 -0
app.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from transformers import launch_gradio_demo
2
+ from code_generating import CodeGeneratingTool
3
+
4
+ launch_gradio_demo(CodeGeneratingTool)
code_generating.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+
4
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ from transformers import LlamaForCausalLM, CodeLlamaTokenizer
18
+ from transformers import PipelineTool
19
+
20
+
21
+ QA_PROMPT = """Here is an example of how I want my code to be: '''{example}'''.
22
+
23
+ Can you generate code for this prompt: '{prompt}'"""
24
+
25
+
26
+ class CodeGeneratingTool(PipelineTool):
27
+ default_checkpoint = "codellama/CodeLlama-7b-Instruct-hf"
28
+ description = (
29
+ "This is a tool that generates codes related to a prompt. It takes two arguments named `example`, which is a template on how the user wants their code to be generated, and `prompt`, which is the prompt of the code, and returns the code to the prompt."
30
+ )
31
+ name = "text_qa"
32
+ pre_processor_class = CodeLlamaTokenizer
33
+ model_class = LlamaForCausalLM
34
+
35
+ inputs = ["text", "text"]
36
+ outputs = ["text"]
37
+
38
+ def encode(self, text: str, question: str):
39
+ prompt = QA_PROMPT.format(text=text, question=question)
40
+ return self.pre_processor(prompt, return_tensors="pt")
41
+
42
+ def forward(self, inputs):
43
+ output_ids = self.model.generate(**inputs)
44
+
45
+ in_b, _ = inputs["input_ids"].shape
46
+ out_b = output_ids.shape[0]
47
+
48
+ return output_ids.reshape(in_b, out_b // in_b, *output_ids.shape[1:])[0][0]
49
+
50
+ def decode(self, outputs):
51
+ return self.pre_processor.decode(outputs, skip_special_tokens=True, clean_up_tokenization_spaces=True)
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ transformers
tool_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "description": "This is a tool that generates codes related to a prompt. It takes two arguments named `example`, which is a template on how the user wants their code to be generated, and `prompt`, which is the prompt of the code, and returns the code to the prompt.",
3
+ "name": "text_qa",
4
+ "tool_class": "code_generating.CodeGeneratingTool"
5
+ }