Xueqing Wu commited on
Commit
e20ef71
·
0 Parent(s):
This view is limited to 50 files because it contains too many changes.   See raw diff
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ __pycache__/
2
+ *.pyc
3
+ pretrained_models
Dockerfile ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:12.1.1-cudnn8-devel-ubuntu20.04
2
+
3
+ # These are all pre-defined
4
+ ENV DEBIAN_FRONTEND=noninteractive \
5
+ TZ=Europe/Paris
6
+ # Install some basic utilities
7
+ RUN rm -f /etc/apt/sources.list.d/*.list && \
8
+ apt-get update && apt-get install -y --no-install-recommends \
9
+ sudo \
10
+ git \
11
+ curl \
12
+ wget \
13
+ ffmpeg libsm6 libxext6 \
14
+ && rm -rf /var/lib/apt/lists/*
15
+ # Create a working directory
16
+ WORKDIR /app
17
+ # Create a non-root user and switch to it
18
+ RUN adduser --disabled-password --gecos '' --shell /bin/bash user \
19
+ && chown -R user:user /app \
20
+ && echo "user ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/90-user
21
+ USER user
22
+ # All users can use /home/user as their home directory
23
+ ENV HOME=/home/user \
24
+ CONDA_AUTO_UPDATE_CONDA=false
25
+ ENV PATH=$HOME/miniconda/bin:$PATH
26
+ RUN mkdir $HOME/.cache $HOME/.config \
27
+ && chmod -R 777 $HOME \
28
+ && curl -sLo ~/miniconda.sh https://repo.continuum.io/miniconda/Miniconda3-py310_24.5.0-0-Linux-x86_64.sh \
29
+ && chmod +x ~/miniconda.sh \
30
+ && ~/miniconda.sh -b -p ~/miniconda \
31
+ && rm ~/miniconda.sh \
32
+ && conda clean -ya
33
+
34
+ # From here are my stuff
35
+
36
+ # Download models
37
+ RUN pip install --no-cache-dir gdown && \
38
+ mkdir -p ./pretrained_models/GLIP/checkpoints && \
39
+ mkdir -p ./pretrained_models/GLIP/configs && \
40
+ mkdir -p ./pretrained_models/xvlm && \
41
+ wget -nc -q -P ./pretrained_models/GLIP/checkpoints https://huggingface.co/GLIPModel/GLIP/resolve/main/glip_large_model.pth && \
42
+ wget -nc -q -P ./pretrained_models/GLIP/configs https://raw.githubusercontent.com/microsoft/GLIP/main/configs/pretrain/glip_Swin_L.yaml && \
43
+ gdown "https://drive.google.com/u/0/uc?id=1bv6_pZOsXW53EhlwU0ZgSk03uzFI61pN" -O ./pretrained_models/xvlm/retrieval_mscoco_checkpoint_9.pth
44
+
45
+ # Python packages
46
+ RUN --mount=target=requirements.txt,source=requirements.txt \
47
+ pip install --no-cache-dir torch torchvision && \
48
+ pip install --no-cache-dir git+https://github.com/openai/CLIP.git && \
49
+ pip install --no-cache-dir -r requirements.txt
50
+
51
+ RUN python -c "from transformers import AutoModel; _ = AutoModel.from_pretrained('codellama/CodeLlama-7b-Python-hf')"
52
+ RUN python -c "from transformers import AutoModel; _ = AutoModel.from_pretrained('VDebugger/VDebugger-critic-generalist-7B')"
53
+ RUN python -c "from transformers import AutoModel; _ = AutoModel.from_pretrained('VDebugger/VDebugger-refiner-generalist-7B')"
54
+
55
+ # Download GLIP dependencies, but unfortunately don't install yet...
56
+ RUN git clone https://github.com/sachit-menon/GLIP
57
+
58
+ # Run gradio
59
+ COPY --link --chown=1000 ./ /app
60
+ EXPOSE 7860
61
+ ENV GRADIO_SERVER_NAME="0.0.0.0"
62
+ CMD ["bash", "app.sh"]
README.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: VDebugger generalist for VQA
3
+ emoji: 💬
4
+ colorFrom: yellow
5
+ colorTo: purple
6
+ sdk: docker
7
+ sdk_version: 4.36.1
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ models:
12
+ - codellama/CodeLlama-7b-Python-hf
13
+ - VDebugger/VDebugger-critic-generalist-7B
14
+ - VDebugger/VDebugger-refiner-generalist-7B
15
+ ---
app.py ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import json
3
+ import os
4
+ import random
5
+ from typing import Literal, cast
6
+
7
+ import gradio as gr
8
+ import torch
9
+ from PIL import Image
10
+ from gradio.data_classes import InterfaceTypes
11
+ from gradio.flagging import CSVLogger
12
+ from torchvision import transforms
13
+ from transformers import AutoTokenizer, LlamaForCausalLM
14
+
15
+ from trace_exec import run_program_with_trace, CompileTimeError
16
+ from vision_processes import load_models
17
+
18
+ print("-" * 10, "Loading models...")
19
+ load_models()
20
+
21
+ with open('joint.prompt') as f:
22
+ prompt_template = f.read().strip()
23
+
24
+ INPUT_TYPE = 'image'
25
+ OUTPUT_TYPE = 'str'
26
+ SIGNATURE = f'def execute_command({INPUT_TYPE}) -> {OUTPUT_TYPE}:'
27
+
28
+
29
+ def generate(model, input_text):
30
+ torch.cuda.empty_cache()
31
+ print("-" * 10, "Before loading LLM:")
32
+ print(torch.cuda.memory_summary())
33
+
34
+ dtype = os.environ.get("CODELLAMA_DTYPE")
35
+ assert dtype in ['bfloat16', '8bit', '4bit', ]
36
+ tokenizer = AutoTokenizer.from_pretrained(model)
37
+ model = LlamaForCausalLM.from_pretrained(
38
+ model,
39
+ device_map="auto",
40
+ load_in_8bit=dtype == "8bit",
41
+ load_in_4bit=dtype == "4bit",
42
+ torch_dtype=torch.bfloat16 if dtype == "bfloat16" else None,
43
+ )
44
+ print("-" * 10, "LLM loaded:")
45
+ print(model)
46
+ print(torch.cuda.memory_summary())
47
+
48
+ input_ids = tokenizer(input_text, return_tensors="pt").input_ids
49
+ generated_ids = model.generate(
50
+ input_ids.to('cuda'), max_new_tokens=256, stop_strings=["\n\n"], do_sample=False, tokenizer=tokenizer
51
+ )
52
+ generated_ids = generated_ids[0][input_ids.shape[1]:]
53
+ text = tokenizer.decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
54
+
55
+ del model
56
+ torch.cuda.empty_cache()
57
+ print("-" * 10, "After loading LLM:")
58
+ print(torch.cuda.memory_summary())
59
+
60
+ return text
61
+
62
+
63
+ def to_custom_trace(result, error, traced):
64
+ if traced is None:
65
+ assert isinstance(error, CompileTimeError)
66
+ traced = 'Compile Error'
67
+ return "-> {}\n\n--- Trace\n\n{}".format(result, traced)
68
+
69
+
70
+ def answer_from_trace(x):
71
+ assert x.startswith("->")
72
+ return x[2:].splitlines()[0].strip()
73
+
74
+
75
+ def debug(image, question, code, traced_info):
76
+ # critic
77
+ prompt = f"# Given an image: {question}\n{code}\n\n{traced_info}\n\n# Program is"
78
+ print("--- For debug: critic prompt is ---")
79
+ print(prompt)
80
+ print("---\n")
81
+ critic_out = generate("VDebugger/VDebugger-critic-generalist-7B", prompt)
82
+ incorrect = critic_out.strip().startswith('wrong')
83
+ critic_out = "# Program is" + critic_out
84
+
85
+ if not incorrect:
86
+ yield code, traced_info, critic_out, "N/A", "N/A", answer_from_trace(traced_info)
87
+ return
88
+ else:
89
+ yield code, traced_info, critic_out, "RUNNING IN PROGRESS...", "", ""
90
+
91
+ # refiner
92
+ critic_code = ('def execute_command' + critic_out.split('def execute_command')[1]).strip()
93
+ if '# Program is' in code:
94
+ critic_code = critic_code.split("# Program is")[0].strip() # errr, an awkward fix
95
+ prompt = f"# Given an image: {question}\n{critic_code}\n\n{traced_info}\n\n# Correction"
96
+ print("--- For debug: refiner prompt is ---")
97
+ print(prompt)
98
+ print("---\n")
99
+ refiner_out = generate("VDebugger/VDebugger-refiner-generalist-7B", prompt).strip()
100
+ yield code, traced_info, critic_out, refiner_out, "RUNNING IN PROGRESS...", ""
101
+
102
+ # execute (again)
103
+ result, error, traced = run_program_with_trace(refiner_out, image, INPUT_TYPE, OUTPUT_TYPE)
104
+ traced_info_2 = to_custom_trace(result, error, traced)
105
+
106
+ yield code, traced_info, critic_out, refiner_out, traced_info_2, answer_from_trace(traced_info_2)
107
+
108
+
109
+ def predict(image, question):
110
+ if image is None:
111
+ gr.Warning("Please provide an image", duration=5)
112
+ return
113
+ image = transforms.Compose([transforms.ToTensor()])(image)
114
+
115
+ question = question.strip()
116
+ if question == "":
117
+ gr.Warning("Please provide a question", duration=5)
118
+ return
119
+
120
+ # codellama
121
+ prompt = prompt_template.replace("INSERT_QUERY_HERE", f"Given an image: {question}\n{SIGNATURE}")
122
+ code = generate("codellama/CodeLlama-7b-Python-hf", prompt)
123
+ code = (SIGNATURE + code).strip()
124
+ yield code, "RUNNING IN PROGRESS...", "", "", "", ""
125
+
126
+ # execute
127
+ result, error, traced = run_program_with_trace(code, image, INPUT_TYPE, OUTPUT_TYPE)
128
+ traced_info = to_custom_trace(result, error, traced)
129
+ yield code, traced_info, "RUNNING IN PROGRESS...", "", "", ""
130
+
131
+ for tup in debug(image, question, code, traced_info):
132
+ yield tup
133
+ return
134
+
135
+
136
+ def re_debug(image, question, code, traced_info):
137
+ if code is None or code == "" or traced_info is None or traced_info == "":
138
+ gr.Warning("No prior debugging round", duration=5)
139
+ return
140
+
141
+ yield code, traced_info, "RUNNING IN PROGRESS...", "", "", ""
142
+ for tup in debug(image, question, code, traced_info):
143
+ yield tup
144
+ return
145
+
146
+
147
+ DESCRIPTION = """# VDebugger
148
+
149
+ | [Paper](https://arxiv.org/abs/2406.13444) | [Project](https://shirley-wu.github.io/vdebugger/) | [Code](https://github.com/shirley-wu/vdebugger/) | [Models and Data](https://huggingface.co/VDebugger) |
150
+
151
+ **VDebugger** is a novel critic-refiner framework trained to localize and debug *visual programs* by tracking execution step by step. In this demo, we show the visual programs, the outputs from both the critic and the refiner, as well as the final result.
152
+
153
+ **Warning:** Reduced performance and accuracy may be observed. Due to resource limitation of huggingface spaces, this demo runs Llama inference in 4-bit quantization and uses smaller foundation VLMs. For full capacity, please use the original code."""
154
+
155
+
156
+ class MyInterface(gr.Interface):
157
+ def __init__(self):
158
+ super(gr.Interface, self).__init__(
159
+ title=None,
160
+ theme=None,
161
+ analytics_enabled=None,
162
+ mode="tabbed_interface",
163
+ css=None,
164
+ js=None,
165
+ head=None,
166
+ )
167
+ self.interface_type = InterfaceTypes.STANDARD
168
+ self.description = DESCRIPTION
169
+ self.cache_examples = None
170
+ self.examples_per_page = 5
171
+ self.example_labels = None
172
+ self.batch = False
173
+ self.live = False
174
+ self.api_name = "predict"
175
+ self.max_batch_size = 4
176
+ self.concurrency_limit = 'default'
177
+ self.show_progress = "full"
178
+ self.allow_flagging = 'auto'
179
+ self.flagging_options = [("Flag", ""), ]
180
+ self.flagging_callback = CSVLogger()
181
+ self.flagging_dir = 'flagged'
182
+
183
+ # Load examples
184
+ with open('examples/questions.json') as f:
185
+ example_questions = json.load(f)
186
+ self.examples = []
187
+ for question in example_questions:
188
+ self.examples.append([
189
+ Image.open('examples/{}.jpg'.format(question['imageId'])), question['question'],
190
+ ])
191
+
192
+ def load_random_example():
193
+ image, question = random.choice(self.examples)
194
+ return image, question, "", "", "", "", "", ""
195
+
196
+ # Render the Gradio UI
197
+ with self:
198
+ self.render_title_description()
199
+
200
+ with gr.Row():
201
+ image = gr.Image(label="Image", type="pil", width="30%", scale=1)
202
+ question = gr.Textbox(label="Question", scale=2)
203
+
204
+ with gr.Row():
205
+ _clear_btn = gr.ClearButton(value="Clear", variant="secondary")
206
+ _random_eg_btn = gr.Button("Random Example Input")
207
+ _submit_btn = gr.Button("Submit", variant="primary")
208
+ if inspect.isgeneratorfunction(predict) or inspect.isasyncgenfunction(predict):
209
+ _stop1_btn = gr.Button("Stop", variant="stop", visible=False)
210
+ _redebug_btn = gr.Button("Debug for Another Round", variant="primary")
211
+ if inspect.isgeneratorfunction(re_debug) or inspect.isasyncgenfunction(re_debug):
212
+ _stop2_btn = gr.Button("Stop", variant="stop", visible=False)
213
+
214
+ with gr.Row():
215
+ o1 = gr.Textbox(label="No debugging: program")
216
+ o2 = gr.Textbox(label="No debugging: execution")
217
+
218
+ with gr.Row():
219
+ o3 = gr.Textbox(label="VDebugger: critic")
220
+ o4 = gr.Textbox(label="VDebugger: refiner")
221
+
222
+ with gr.Row():
223
+ o5 = gr.Textbox(label="VDebugger: execution")
224
+ o6 = gr.Textbox(label="VDebugger: final answer")
225
+
226
+ question.submit(fn=predict, inputs=[image, question], outputs=[o1, o2, o3, o4, o5, o6])
227
+ _random_eg_btn.click(fn=load_random_example, outputs=[image, question, o1, o2, o3, o4, o5, o6])
228
+
229
+ async def cleanup():
230
+ return [gr.Button(visible=True), gr.Button(visible=False)]
231
+
232
+ # Setup redebug event
233
+ triggers = [_redebug_btn.click, ]
234
+ extra_output = [_redebug_btn, _stop2_btn]
235
+ predict_event = gr.on(
236
+ triggers,
237
+ gr.utils.async_lambda(
238
+ lambda: (
239
+ gr.Button(visible=False),
240
+ gr.Button(visible=True),
241
+ )
242
+ ),
243
+ inputs=None,
244
+ outputs=[_redebug_btn, _stop2_btn],
245
+ queue=False,
246
+ show_api=False,
247
+ ).then(
248
+ re_debug,
249
+ [image, question, o4, o5],
250
+ [o1, o2, o3, o4, o5, o6],
251
+ api_name=self.api_name,
252
+ scroll_to_output=False,
253
+ preprocess=not (self.api_mode),
254
+ postprocess=not (self.api_mode),
255
+ batch=self.batch,
256
+ max_batch_size=self.max_batch_size,
257
+ concurrency_limit=self.concurrency_limit,
258
+ show_progress=cast(
259
+ Literal["full", "minimal", "hidden"], self.show_progress
260
+ ),
261
+ )
262
+ redebug_event = predict_event.then(
263
+ cleanup,
264
+ inputs=None,
265
+ outputs=extra_output, # type: ignore
266
+ queue=False,
267
+ show_api=False,
268
+ )
269
+ _stop2_btn.click(
270
+ cleanup,
271
+ inputs=None,
272
+ outputs=[_redebug_btn, _stop2_btn],
273
+ cancels=predict_event,
274
+ queue=False,
275
+ show_api=False,
276
+ )
277
+
278
+ # Setup submit event
279
+ triggers = [_submit_btn.click, question.submit, ]
280
+ extra_output = [_submit_btn, _stop1_btn]
281
+ predict_event = gr.on(
282
+ triggers,
283
+ gr.utils.async_lambda(
284
+ lambda: (
285
+ gr.Button(visible=False),
286
+ gr.Button(visible=True),
287
+ )
288
+ ),
289
+ inputs=None,
290
+ outputs=[_submit_btn, _stop1_btn],
291
+ queue=False,
292
+ show_api=False,
293
+ ).then(
294
+ predict,
295
+ [image, question],
296
+ [o1, o2, o3, o4, o5, o6],
297
+ api_name=self.api_name,
298
+ scroll_to_output=False,
299
+ preprocess=not (self.api_mode),
300
+ postprocess=not (self.api_mode),
301
+ batch=self.batch,
302
+ max_batch_size=self.max_batch_size,
303
+ concurrency_limit=self.concurrency_limit,
304
+ show_progress=cast(
305
+ Literal["full", "minimal", "hidden"], self.show_progress
306
+ ),
307
+ )
308
+ submit_event = predict_event.then(
309
+ cleanup,
310
+ inputs=None,
311
+ outputs=extra_output, # type: ignore
312
+ queue=False,
313
+ show_api=False,
314
+ )
315
+ _stop1_btn.click(
316
+ cleanup,
317
+ inputs=None,
318
+ outputs=[_submit_btn, _stop1_btn],
319
+ cancels=predict_event,
320
+ queue=False,
321
+ show_api=False,
322
+ )
323
+
324
+ # Finally borrow Interface stuff
325
+ self.input_components = [image, question]
326
+ self.output_components = [o1, o2, o3, o4, o5, o6]
327
+ self.fn = predict
328
+ self.attach_clear_events(_clear_btn, None)
329
+ self.render_examples()
330
+
331
+
332
+ if __name__ == "__main__":
333
+ MyInterface().launch(share=os.environ.get("SHARE", '') != "")
app.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ cd GLIP
2
+ python setup.py clean --all build develop --user
3
+ cd ../
4
+ python -c "import maskrcnn_benchmark" # check successfully installed
5
+ python app.py
examples/n111074.jpg ADDED
examples/n113863.jpg ADDED
examples/n11399.jpg ADDED
examples/n115850.jpg ADDED
examples/n116797.jpg ADDED
examples/n116868.jpg ADDED
examples/n132998.jpg ADDED
examples/n137739.jpg ADDED
examples/n140477.jpg ADDED
examples/n14897.jpg ADDED
examples/n151233.jpg ADDED
examples/n154501.jpg ADDED
examples/n155638.jpg ADDED
examples/n168871.jpg ADDED
examples/n173361.jpg ADDED
examples/n173931.jpg ADDED
examples/n176076.jpg ADDED
examples/n177259.jpg ADDED
examples/n177566.jpg ADDED
examples/n178654.jpg ADDED
examples/n179572.jpg ADDED
examples/n183744.jpg ADDED
examples/n188669.jpg ADDED
examples/n193989.jpg ADDED
examples/n194711.jpg ADDED
examples/n196522.jpg ADDED
examples/n209769.jpg ADDED
examples/n210898.jpg ADDED
examples/n222443.jpg ADDED
examples/n2381.jpg ADDED
examples/n238886.jpg ADDED
examples/n241130.jpg ADDED
examples/n241451.jpg ADDED
examples/n241713.jpg ADDED
examples/n24680.jpg ADDED
examples/n249342.jpg ADDED
examples/n25398.jpg ADDED
examples/n256710.jpg ADDED
examples/n272929.jpg ADDED
examples/n278426.jpg ADDED
examples/n279408.jpg ADDED
examples/n282460.jpg ADDED
examples/n288083.jpg ADDED
examples/n291937.jpg ADDED