image
stringlengths 15
15
| id
stringlengths 8
12
| conversations
stringlengths 79
268
|
---|---|---|
00000002017.jpg | 2017_evjvqa | [{"from": "human", "value": "what items does the woman intend to buy?\n<image>"}, {"from": "gpt", "value": "vegetable"}] |
00000002017.jpg | 2017_evjvqa | [{"from": "human", "value": "which item is the woman looking at?\n<image>"}, {"from": "gpt", "value": "vegetable"}] |
00000002020.jpg | 2020_evjvqa | [{"from": "human", "value": "what means of transport does the boy go to the bookstore?\n<image>"}, {"from": "gpt", "value": "motorbike"}] |
00000002029.jpg | 2029_evjvqa | [{"from": "human", "value": "what are these women holding in their hands?\n<image>"}, {"from": "gpt", "value": "flower branches"}] |
00000002030.jpg | 2030_evjvqa | [{"from": "human", "value": "how many people are standing to speak?\n<image>"}, {"from": "gpt", "value": "one person is standing to speak"}] |
00000002030.jpg | 2030_evjvqa | [{"from": "human", "value": "how many people sit on the right side of the standing man?\n<image>"}, {"from": "gpt", "value": "two people are sitting on the right side of the standing man"}] |
00000002031.jpg | 2031_evjvqa | [{"from": "human", "value": "which counter is the woman standing at?\n<image>"}, {"from": "gpt", "value": "fresh meat counter"}] |
00000002031.jpg | 2031_evjvqa | [{"from": "human", "value": "who does the woman buying fresh meat go with?\n<image>"}, {"from": "gpt", "value": "goes alone"}] |
00000002031.jpg | 2031_evjvqa | [{"from": "human", "value": "what is the woman in black doing?\n<image>"}, {"from": "gpt", "value": "buying fresh meat"}] |
00000002034.jpg | 2034_evjvqa | [{"from": "human", "value": "where is the motorbike parked?\n<image>"}, {"from": "gpt", "value": "next to the bicycle"}] |
00000002034.jpg | 2034_evjvqa | [{"from": "human", "value": "where is the bicycle parked?\n<image>"}, {"from": "gpt", "value": "next to the motorbike"}] |
00000002036.jpg | 2036_evjvqa | [{"from": "human", "value": "how many cars are on the road?\n<image>"}, {"from": "gpt", "value": "one car is on the road"}] |
00000002036.jpg | 2036_evjvqa | [{"from": "human", "value": "which direction is the young man wearing the helmet moving?\n<image>"}, {"from": "gpt", "value": "moving in the opposite direction of the car"}] |
00000002038.jpg | 2038_evjvqa | [{"from": "human", "value": "how many vehicles are parked in front of the store?\n<image>"}, {"from": "gpt", "value": "one vehicle"}] |
00000002038.jpg | 2038_evjvqa | [{"from": "human", "value": "where is the vehicle parked?\n<image>"}, {"from": "gpt", "value": "in front of a fashion clothing store"}] |
00000002041.jpg | 2041_evjvqa | [{"from": "human", "value": "where is this building built?\n<image>"}, {"from": "gpt", "value": "beside a river"}] |
00000002043.jpg | 2043_evjvqa | [{"from": "human", "value": "what is the girl holding in her hand?\n<image>"}, {"from": "gpt", "value": "guitar"}] |
00000002043.jpg | 2043_evjvqa | [{"from": "human", "value": "where is the girl standing?\n<image>"}, {"from": "gpt", "value": "on the railway line"}] |
00000002043.jpg | 2043_evjvqa | [{"from": "human", "value": "how is the girl standing with the guitar dressed?\n<image>"}, {"from": "gpt", "value": "white ao dai"}] |
00000002046.jpg | 2046_evjvqa | [{"from": "human", "value": "what construction is this street along?\n<image>"}, {"from": "gpt", "value": "along the apartment buildings"}] |
00000002047.jpg | 2047_evjvqa | [{"from": "human", "value": "how many chairs are there in the restaurant behind the girl?\n<image>"}, {"from": "gpt", "value": "three chairs"}] |
00000002048.jpg | 2048_evjvqa | [{"from": "human", "value": "how many people are sitting in the pub?\n<image>"}, {"from": "gpt", "value": "two people"}] |
00000002049.jpg | 2049_evjvqa | [{"from": "human", "value": "where are the two men standing?\n<image>"}, {"from": "gpt", "value": "milk counter"}] |
00000002049.jpg | 2049_evjvqa | [{"from": "human", "value": "what item are the two men intending to buy?\n<image>"}, {"from": "gpt", "value": "buy milk"}] |
00000002049.jpg | 2049_evjvqa | [{"from": "human", "value": "how many people are buying milk?\n<image>"}, {"from": "gpt", "value": "two people"}] |
00000002056.jpg | 2056_evjvqa | [{"from": "human", "value": "what objects are displayed on the street?\n<image>"}, {"from": "gpt", "value": "painting"}] |
00000002056.jpg | 2056_evjvqa | [{"from": "human", "value": "where are the paintings displayed?\n<image>"}, {"from": "gpt", "value": "on the street"}] |
00000002056.jpg | 2056_evjvqa | [{"from": "human", "value": "what are the boy holding the camera and the girls doing?\n<image>"}, {"from": "gpt", "value": "reviewing the recently taken photo"}] |
00000002056.jpg | 2056_evjvqa | [{"from": "human", "value": "what works of art are mainly displayed on this road?\n<image>"}, {"from": "gpt", "value": "painting"}] |
00000002061.jpg | 2061_evjvqa | [{"from": "human", "value": "what artifacts does this land display?\n<image>"}, {"from": "gpt", "value": "weapon of war"}] |
00000002061.jpg | 2061_evjvqa | [{"from": "human", "value": "what artifact are the two girls standing looking at?\n<image>"}, {"from": "gpt", "value": "tank"}] |
00000002061.jpg | 2061_evjvqa | [{"from": "human", "value": "how many people are watching in the gallery?\n<image>"}, {"from": "gpt", "value": "three people"}] |
00000002062.jpg | 2062_evjvqa | [{"from": "human", "value": "where is the group of students going on tour?\n<image>"}, {"from": "gpt", "value": "historical site"}] |
00000002062.jpg | 2062_evjvqa | [{"from": "human", "value": "how many teachers does the group of students go on a tour with?\n<image>"}, {"from": "gpt", "value": "two teachers"}] |
00000002062.jpg | 2062_evjvqa | [{"from": "human", "value": "who do the two women go on a tour with?\n<image>"}, {"from": "gpt", "value": "with a group of students"}] |
00000002064.jpg | 2064_evjvqa | [{"from": "human", "value": "where does this bridge lead to?\n<image>"}, {"from": "gpt", "value": "to a historical site"}] |
00000002065.jpg | 2065_evjvqa | [{"from": "human", "value": "what area are people concentrating in?\n<image>"}, {"from": "gpt", "value": "billing area"}] |
00000002066.jpg | 2066_evjvqa | [{"from": "human", "value": "what item is the girl in the green shirt choosing?\n<image>"}, {"from": "gpt", "value": "vegetable"}] |
00000002066.jpg | 2066_evjvqa | [{"from": "human", "value": "which counter is the girl in the green shirt standing at?\n<image>"}, {"from": "gpt", "value": "vegetable counter"}] |
00000002066.jpg | 2066_evjvqa | [{"from": "human", "value": "how many people are standing on the same side of the vegetable counter with the girl?\n<image>"}, {"from": "gpt", "value": "three people"}] |
00000002068.jpg | 2068_evjvqa | [{"from": "human", "value": "where are these people standing?\n<image>"}, {"from": "gpt", "value": "at a stall in a fair"}] |
00000002069.jpg | 2069_evjvqa | [{"from": "human", "value": "what goods do these boats carry?\n<image>"}, {"from": "gpt", "value": "vegetables, fruits"}] |
00000002071.jpg | 2071_evjvqa | [{"from": "human", "value": "what are inside the glass box behind the girl?\n<image>"}, {"from": "gpt", "value": "newspapers"}] |
00000002071.jpg | 2071_evjvqa | [{"from": "human", "value": "who is the girl standing with?\n<image>"}, {"from": "gpt", "value": "the girl is standing alone"}] |
00000002071.jpg | 2071_evjvqa | [{"from": "human", "value": "what color shirt is the standing girl wearing?\n<image>"}, {"from": "gpt", "value": "black"}] |
00000002075.jpg | 2075_evjvqa | [{"from": "human", "value": "how many men are walking on the street?\n<image>"}, {"from": "gpt", "value": "two men"}] |
00000002075.jpg | 2075_evjvqa | [{"from": "human", "value": "how many people are walking on the street wearing conical hats?\n<image>"}, {"from": "gpt", "value": "one person"}] |
00000002076.jpg | 2076_evjvqa | [{"from": "human", "value": "what patterns are carved into the cup?\n<image>"}, {"from": "gpt", "value": "shape of a man"}] |
00000002076.jpg | 2076_evjvqa | [{"from": "human", "value": "where is the skull placed compared to the cup?\n<image>"}, {"from": "gpt", "value": "to the left of the cup"}] |
00000002078.jpg | 2078_evjvqa | [{"from": "human", "value": "how is the girl standing in the middle of the street dressed?\n<image>"}, {"from": "gpt", "value": "ao dai wearing conical hat"}] |
00000002078.jpg | 2078_evjvqa | [{"from": "human", "value": "how many people is sitting in front of the house on the corner?\n<image>"}, {"from": "gpt", "value": "one person"}] |
00000002078.jpg | 2078_evjvqa | [{"from": "human", "value": "who is the girl standing in the middle of the street with?\n<image>"}, {"from": "gpt", "value": "the girl is standing alone"}] |
00000002083.jpg | 2083_evjvqa | [{"from": "human", "value": "what do the women rowing boats wear on their heads?\n<image>"}, {"from": "gpt", "value": "conical hats"}] |
00000002083.jpg | 2083_evjvqa | [{"from": "human", "value": "how many women are sailing?\n<image>"}, {"from": "gpt", "value": "two women"}] |
00000002083.jpg | 2083_evjvqa | [{"from": "human", "value": "what goods do the boats carry?\n<image>"}, {"from": "gpt", "value": "fruit"}] |
00000002085.jpg | 2085_evjvqa | [{"from": "human", "value": "what is the man in the black mask doing?\n<image>"}, {"from": "gpt", "value": "buying rice"}] |
00000002085.jpg | 2085_evjvqa | [{"from": "human", "value": "what color shirt does the young man selling rice wear?\n<image>"}, {"from": "gpt", "value": "red"}] |
00000002085.jpg | 2085_evjvqa | [{"from": "human", "value": "what is the young man in the red shirt doing?\n<image>"}, {"from": "gpt", "value": "standing and selling rice"}] |
00000002085.jpg | 2085_evjvqa | [{"from": "human", "value": "how many people are standing and selling?\n<image>"}, {"from": "gpt", "value": "one person"}] |
00000002085.jpg | 2085_evjvqa | [{"from": "human", "value": "how many people are buying rice?\n<image>"}, {"from": "gpt", "value": "one person"}] |
00000002089.jpg | 2089_evjvqa | [{"from": "human", "value": "how many people are walking across the street?\n<image>"}, {"from": "gpt", "value": "a person is walking across the street"}] |
00000002089.jpg | 2089_evjvqa | [{"from": "human", "value": "what color shirt is the girl walking across the street wearing?\n<image>"}, {"from": "gpt", "value": "white shirt"}] |
00000002091.jpg | 2091_evjvqa | [{"from": "human", "value": "where is the woman walking?\n<image>"}, {"from": "gpt", "value": "on the main path leading to the citadel"}] |
00000002091.jpg | 2091_evjvqa | [{"from": "human", "value": "where does the woman visit?\n<image>"}, {"from": "gpt", "value": "at an old citadel"}] |
00000002091.jpg | 2091_evjvqa | [{"from": "human", "value": "where does the path where the woman is walking lead to?\n<image>"}, {"from": "gpt", "value": "to a citadel"}] |
00000002092.jpg | 2092_evjvqa | [{"from": "human", "value": "how many statues are placed in the room?\n<image>"}, {"from": "gpt", "value": "one statue is placed in the room"}] |
00000002092.jpg | 2092_evjvqa | [{"from": "human", "value": "what artifacts does this room display?\n<image>"}, {"from": "gpt", "value": "war artifacts"}] |
00000002092.jpg | 2092_evjvqa | [{"from": "human", "value": "where is the statue placed?\n<image>"}, {"from": "gpt", "value": "in a gallery in the museum"}] |
00000002093.jpg | 2093_evjvqa | [{"from": "human", "value": "where are these items displayed?\n<image>"}, {"from": "gpt", "value": "on the shelves in a grocery store"}] |
00000002094.jpg | 2094_evjvqa | [{"from": "human", "value": "where are the trolleys filled of goods?\n<image>"}, {"from": "gpt", "value": "in the supermarket"}] |
00000002094.jpg | 2094_evjvqa | [{"from": "human", "value": "how many staff are arranging goods from trolleys?\n<image>"}, {"from": "gpt", "value": "a staff is arranging goods from trolleys"}] |
00000002095.jpg | 2095_evjvqa | [{"from": "human", "value": "what do people decorate on both sides of the road?\n<image>"}, {"from": "gpt", "value": "walls with historical articles"}] |
00000002098.jpg | 2098_evjvqa | [{"from": "human", "value": "what does the counter that the young man in the blue shirt passing sell?\n<image>"}, {"from": "gpt", "value": "toilet paper"}] |
00000002098.jpg | 2098_evjvqa | [{"from": "human", "value": "what did the young man in the blue shirt put in the trolley?\n<image>"}, {"from": "gpt", "value": "toilet paper"}] |
00000002098.jpg | 2098_evjvqa | [{"from": "human", "value": "what did the young man in the blue shirt choose to buy?\n<image>"}, {"from": "gpt", "value": "toilet paper"}] |
00000010903.jpg | 10903_evjvqa | [{"from": "human", "value": "what scene is drawn in this picture?\n<image>"}, {"from": "gpt", "value": "there are the boats on the river"}] |
00000010907.jpg | 10907_evjvqa | [{"from": "human", "value": "what vehicle is moving on the water?\n<image>"}, {"from": "gpt", "value": "there is a ferry moving on the water"}] |
00000010909.jpg | 10909_evjvqa | [{"from": "human", "value": "who is sitting on the chair?\n<image>"}, {"from": "gpt", "value": "it is the girl in yellow t-shirt sitting on a chair"}] |
00000010910.jpg | 10910_evjvqa | [{"from": "human", "value": "what uniforms do these students have?\n<image>"}, {"from": "gpt", "value": "these students' uniforms consist of red trousers and a white t-shirt"}] |
00000010912.jpg | 10912_evjvqa | [{"from": "human", "value": "what items does this stall sell?\n<image>"}, {"from": "gpt", "value": "the tet decorations"}] |
00000010913.jpg | 10913_evjvqa | [{"from": "human", "value": "what color hat is the boy standing in front of the woman wearing a yellow shirt with flowers?\n<image>"}, {"from": "gpt", "value": "that boy is wearing a red hat"}] |
00000010915.jpg | 10915_evjvqa | [{"from": "human", "value": "what historical place are these two young people standing in front of?\n<image>"}, {"from": "gpt", "value": "they are standing in front of the hue citadel"}] |
00000010918.jpg | 10918_evjvqa | [{"from": "human", "value": "what is opposite the temple?\n<image>"}, {"from": "gpt", "value": "there is the ancient tree"}] |
00000010920.jpg | 10920_evjvqa | [{"from": "human", "value": "who is pulling the car?\n<image>"}, {"from": "gpt", "value": "it is the farmer in blue shirt pulling cart"}] |
00000010921.jpg | 10921_evjvqa | [{"from": "human", "value": "what is the object behind the man in the white shirt sitting?\n<image>"}, {"from": "gpt", "value": "the electric fan"}] |
00000010923.jpg | 10923_evjvqa | [{"from": "human", "value": "what color are the flowers on the branches?\n<image>"}, {"from": "gpt", "value": "yellow"}] |
00000010924.jpg | 10924_evjvqa | [{"from": "human", "value": "who is standing opposite the young man in a black coat?\n<image>"}, {"from": "gpt", "value": "it is the woman in blue"}] |
00000010925.jpg | 10925_evjvqa | [{"from": "human", "value": "how many people are behind the man sitting in front of the microphone?\n<image>"}, {"from": "gpt", "value": "three"}] |
00000010927.jpg | 10927_evjvqa | [{"from": "human", "value": "what are built next to these old houses?\n<image>"}, {"from": "gpt", "value": "the pond"}] |
00000010929.jpg | 10929_evjvqa | [{"from": "human", "value": "where is this young man standing?\n<image>"}, {"from": "gpt", "value": "this person is standing in the aisle next to a parking lot"}] |
00000010930.jpg | 10930_evjvqa | [{"from": "human", "value": "what is placed in the glass cage?\n<image>"}, {"from": "gpt", "value": "the antiquities"}] |
00000010931.jpg | 10931_evjvqa | [{"from": "human", "value": "what vehicle is moving into the doorway?\n<image>"}, {"from": "gpt", "value": "the cyclo"}] |
00000010933.jpg | 10933_evjvqa | [{"from": "human", "value": "what items are used to cover the stalls?\n<image>"}, {"from": "gpt", "value": "the umbrellas"}] |
00000010935.jpg | 10935_evjvqa | [{"from": "human", "value": "who is the hawker?\n<image>"}, {"from": "gpt", "value": "it is the man in red jacket"}] |
00000010936.jpg | 10936_evjvqa | [{"from": "human", "value": "how many pillars are there in front of the tower?\n<image>"}, {"from": "gpt", "value": "two"}] |
00000010937.jpg | 10937_evjvqa | [{"from": "human", "value": "where are the motorbikes being parked in?\n<image>"}, {"from": "gpt", "value": "close to the roadside"}] |
00000010939.jpg | 10939_evjvqa | [{"from": "human", "value": "what is the woman wearing pink mask doing?\n<image>"}, {"from": "gpt", "value": "she is putting tomatoes in her bag"}] |
00000010940.jpg | 10940_evjvqa | [{"from": "human", "value": "who is wearing the apron?\n<image>"}, {"from": "gpt", "value": "it is the woman on the left"}] |
00000010941.jpg | 10941_evjvqa | [{"from": "human", "value": "what device is this person holding?\n<image>"}, {"from": "gpt", "value": "the smartphone"}] |
00000010943.jpg | 10943_evjvqa | [{"from": "human", "value": "what store is the girl in the yellow jacket standing in?\n<image>"}, {"from": "gpt", "value": "the clothes shop "}] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.