instruction
stringclasses
1 value
output
stringlengths
64
69.4k
input
stringlengths
205
32.4k
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private String getUrlContents(String urlString) throws Exception { System.setProperty ("jsse.enableSNIExtension", "false"); URL url = new URL(urlString); URLConnection urlc = url.openConnection(); urlc.setRequestProperty("Accept", "application/json, */*"); urlc.connect(); StringBuilder contents = new StringBuilder(); InputStream in = urlc.getInputStream(); for(int i = 0;i!= -1;i= in.read()){ char c = (char)i; if(!Character.isISOControl(c)) contents.append((char)i); } in.close(); return contents.toString(); }
#vulnerable code private String getUrlContents(String urlString) throws Exception { System.setProperty ("jsse.enableSNIExtension", "false"); URL url = new URL(urlString); BufferedReader in = new BufferedReader( new InputStreamReader(url.openStream())); String inputLine; StringBuilder contents = new StringBuilder(); while ((inputLine = in.readLine()) != null) contents.append(inputLine); in.close(); return contents.toString(); } #location 12 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testParsingErrorPositionLargeInput() throws IOException { // 2048 is the buffer size, this will allow us to test position // information for large input that needs to be buffered char[] in = new char[2048 + 7]; in[0] = '['; for (int i = 1; i < 2046; i++) in[i] = '1'; in[2046] = ','; in[2047] = '\n'; in[2048] = '3'; in[2049] = '3'; in[2050] = ','; in[2051] = '\n'; in[2052] = '5'; in[2053] = 'x'; in[2054] = ']'; /* looks like : * [11111.....111, * 3, * 5x] */ @SuppressWarnings("resource") JsonReader reader = new JsonReader(new CharArrayReader(in), strictDoubleParse, readMetadata); try { for (reader.beginArray(); reader.hasNext();) { reader.next(); reader.valueAsDouble(); } reader.endArray(); fail(); } catch (JsonStreamException e) { assertEquals(2, e.getRow()); assertEquals(1, e.getColumn()); } }
#vulnerable code @Test public void testParsingErrorPositionLargeInput() throws IOException { // 2048 is the buffer size, this will allow us to test position // information for large input that needs to be buffered char[] in = new char[2048 + 7]; in[0] = '['; for (int i = 1; i < 2046; i++) in[i] = '1'; in[2046] = ','; in[2047] = '\n'; in[2048] = '3'; in[2049] = '3'; in[2050] = ','; in[2051] = '\n'; in[2052] = '5'; in[2053] = 'x'; in[2054] = ']'; /* looks like : * [11111.....111, * 3, * 5x] */ @SuppressWarnings("resource") JsonReader reader = new JsonReader(new CharArrayReader(in), strictDoubleParse, readMetadata); try { System.out.println(in); for (reader.beginArray(); reader.hasNext();) { reader.next(); reader.valueAsDouble(); } System.out.println(reader.valueAsInt()); fail(); } catch (JsonStreamException e) { e.printStackTrace(); assertEquals(2, e.getRow()); assertEquals(1, e.getColumn()); } } #location 31 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testMultipleCallsTonextObjectMetadata() throws IOException { String src = "{\"@class\" : \"theclass\"" + ", \"@author\":\"me\"" + ", \"@comment\":\"no comment\"}"; JsonReader reader = new JsonReader(new StringReader(src)); assertEquals("theclass", reader.nextObjectMetadata().nextObjectMetadata().metadata("class")); assertEquals("theclass", reader.nextObjectMetadata().metadata("class")); assertEquals("no comment", reader.metadata("comment")); assertEquals("no comment", reader.nextObjectMetadata().metadata("comment")); assertEquals("me", reader.beginObject().metadata("author")); reader.endObject(); reader.close(); }
#vulnerable code @Test public void testMultipleCallsTonextObjectMetadata() throws IOException { String src = "{\"@class\" : \"theclass\"" + ", \"@author\":\"me\"" + ", \"@comment\":\"no comment\"}"; JsonReader reader = new JsonReader(new StringReader(src)); assertEquals("theclass", reader.nextObjectMetadata().nextObjectMetadata().metadata("class")); assertEquals("theclass", reader.nextObjectMetadata().metadata("class")); assertEquals("no comment", reader.metadata("comment")); assertEquals("no comment", reader.nextObjectMetadata().metadata("comment")); assertEquals("me", reader.beginObject().metadata("author")); reader.endObject(); } #location 11 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public <T> String serialize(T o, GenericType<T> type) throws TransformationException, IOException { StringWriter sw = new StringWriter(); ObjectWriter writer = createWriter(sw); if (o == null) nullConverter.serialize(null, writer, null); else serialize(o, type.getType(), writer, new Context(this)); writer.flush(); return sw.toString(); }
#vulnerable code public <T> String serialize(T o, GenericType<T> type) throws TransformationException, IOException { JsonWriter writer = new JsonWriter(new StringWriter(), skipNull, htmlSafe); if (o == null) nullConverter.serialize(null, writer, null); else serialize(o, type.getType(), writer, new Context(this)); writer.flush(); return writer.unwrap().toString(); } #location 9 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testReadMalformedJson() throws IOException { String src = ""; JsonReader reader = new JsonReader(new StringReader(src), strictDoubleParse, readMetadata); try { reader.beginObject(); fail(); } catch (JsonStreamException ise) { } reader.close(); }
#vulnerable code @Test public void testReadMalformedJson() throws IOException { String src = ""; JsonReader reader = new JsonReader(new StringReader(src), strictDoubleParse, readMetadata); try { reader.beginObject(); fail(); } catch (IllegalStateException ise) { } reader.close(); } #location 9 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public <T> String serialize(T o) throws TransformationException, IOException { StringWriter sw = new StringWriter(); ObjectWriter writer = createWriter(sw); if (o == null) nullConverter.serialize(null, writer, null); else serialize(o, o.getClass(), writer, new Context(this)); writer.flush(); return sw.toString(); }
#vulnerable code public <T> String serialize(T o) throws TransformationException, IOException { JsonWriter writer = new JsonWriter(new StringWriter(), skipNull, htmlSafe); if (o == null) nullConverter.serialize(null, writer, null); else serialize(o, o.getClass(), writer, new Context(this)); writer.flush(); return writer.unwrap().toString(); } #location 8 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public boolean valueAsBoolean() throws IOException { if (BOOLEAN == valueType) { return _booleanValue; } if (STRING == valueType) return Boolean.parseBoolean(_stringValue); if (NULL == valueType) return false; throw new IllegalStateException("Readen value is not of type boolean"); }
#vulnerable code public boolean valueAsBoolean() throws IOException { if (BOOLEAN == valueType) { return _booleanValue; } if (STRING == valueType) return "".equals(_stringValue) ? null : Boolean.valueOf(_stringValue); if (NULL == valueType) return false; throw new IllegalStateException("Readen value is not of type boolean"); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private final void newMisplacedTokenException(int cursor) { if (_buflen < 0) throw new IllegalStateException( "Incomplete data or malformed json : encoutered end of stream."); if (cursor < 0) cursor = 0; int pos = _position - (_buflen - cursor); if (pos < 0) pos = 0; throw new IllegalStateException("Encountred misplaced character '" + _buffer[cursor] + "' around position " + pos); }
#vulnerable code private final void newMisplacedTokenException(int cursor) { if (_buflen < 0) throw new IllegalStateException( "Incomplete data or malformed json : encoutered end of stream."); if (cursor < 0) cursor = 0; int pos = (_position - valueAsString().length() - _buflen + cursor); if (pos < 0) pos = 0; throw new IllegalStateException("Encountred misplaced character '" + _buffer[cursor] + "' around position " + pos); } #location 7 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private final void newWrongTokenException(String awaited, int cursor) { // otherwise it fails when an error occurs on first character if (cursor < 0) cursor = 0; int pos = _position - (_buflen - cursor); if (pos < 0) pos = 0; if (_buflen < 0) throw new IllegalStateException( "Incomplete data or malformed json : encoutered end of stream but expected " + awaited); else throw new IllegalStateException("Illegal character at position " + pos + " expected " + awaited + " but read '" + _buffer[cursor] + "' !"); }
#vulnerable code private final void newWrongTokenException(String awaited, int cursor) { // otherwise it fails when an error occurs on first character if (cursor < 0) cursor = 0; int pos = (_position - valueAsString().length() - _buflen + cursor); if (pos < 0) pos = 0; if (_buflen < 0) throw new IllegalStateException( "Incomplete data or malformed json : encoutered end of stream but expected " + awaited); else throw new IllegalStateException("Illegal character at position " + pos + " expected " + awaited + " but read '" + _buffer[cursor] + "' !"); } #location 4 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testIncompleteSource() throws IOException { String src = "[1,"; JsonReader reader = new JsonReader(new StringReader(src)); try { reader.beginArray(); reader.next(); reader.next(); fail(); } catch (IOException ioe) {} reader.close(); }
#vulnerable code @Test public void testIncompleteSource() throws IOException { String src = "[1,"; JsonReader reader = new JsonReader(new StringReader(src)); try { reader.beginArray(); reader.next(); reader.next(); fail(); } catch (IOException ioe) {} } #location 6 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void run(Bootstrap bootstrap, Namespace args) { // read and initialize arguments: GraphHopperConfig graphHopperConfiguration = new GraphHopperConfig(); graphHopperConfiguration.setProfiles(Collections.singletonList(new ProfileConfig("fast_car").setVehicle("car").setWeighting("fastest"))); graphHopperConfiguration.putObject("graph.location", "graph-cache"); seed = args.getLong("seed"); count = args.getInt("count"); GraphHopper graphHopper = new GraphHopperOSM(); graphHopper.init(graphHopperConfiguration).forDesktop(); graphHopper.importOrLoad(); // and map-matching stuff GraphHopperStorage graph = graphHopper.getGraphHopperStorage(); bbox = graph.getBounds(); LocationIndexTree locationIndex = (LocationIndexTree) graphHopper.getLocationIndex(); MapMatching mapMatching = new MapMatching(graphHopper, new HintsMap().putObject("profile", "fast_car")); // start tests: StopWatch sw = new StopWatch().start(); try { printLocationIndexMatchQuery(locationIndex); printTimeOfMapMatchQuery(graphHopper, mapMatching); System.gc(); } catch (Exception ex) { logger.error("Problem while measuring", ex); properties.put("error", "" + ex.toString()); } finally { properties.put("measurement.count", "" + count); properties.put("measurement.seed", "" + seed); properties.put("measurement.time", "" + sw.stop().getMillis()); System.gc(); properties.put("measurement.totalMB", "" + Helper.getTotalMB()); properties.put("measurement.usedMB", "" + Helper.getUsedMB()); try { FileWriter fileWriter = new FileWriter(args.<File>get("outfile")); for (Entry<String, String> e : properties.entrySet()) { fileWriter.append(e.getKey()); fileWriter.append("="); fileWriter.append(e.getValue()); fileWriter.append("\n"); } fileWriter.flush(); } catch (IOException ex) { logger.error( "Problem while writing measurements", ex); } } }
#vulnerable code @Override public void run(Bootstrap bootstrap, Namespace args) { // read and initialize arguments: GraphHopperConfig graphHopperConfiguration = new GraphHopperConfig(); graphHopperConfiguration.putObject("graph.location", "graph-cache"); seed = args.getLong("seed"); count = args.getInt("count"); GraphHopper graphHopper = new GraphHopperOSM(); graphHopper.init(graphHopperConfiguration).forDesktop(); graphHopper.importOrLoad(); // and map-matching stuff GraphHopperStorage graph = graphHopper.getGraphHopperStorage(); bbox = graph.getBounds(); LocationIndexTree locationIndex = (LocationIndexTree) graphHopper.getLocationIndex(); MapMatching mapMatching = new MapMatching(graphHopper, new HintsMap()); // start tests: StopWatch sw = new StopWatch().start(); try { printLocationIndexMatchQuery(locationIndex); printTimeOfMapMatchQuery(graphHopper, mapMatching); System.gc(); } catch (Exception ex) { logger.error("Problem while measuring", ex); properties.put("error", "" + ex.toString()); } finally { properties.put("measurement.count", "" + count); properties.put("measurement.seed", "" + seed); properties.put("measurement.time", "" + sw.stop().getMillis()); System.gc(); properties.put("measurement.totalMB", "" + Helper.getTotalMB()); properties.put("measurement.usedMB", "" + Helper.getUsedMB()); try { FileWriter fileWriter = new FileWriter(args.<File>get("outfile")); for (Entry<String, String> e : properties.entrySet()) { fileWriter.append(e.getKey()); fileWriter.append("="); fileWriter.append(e.getValue()); fileWriter.append("\n"); } fileWriter.flush(); } catch (IOException ex) { logger.error( "Problem while writing measurements", ex); } } } #location 43 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void run(Bootstrap bootstrap, Namespace args) { // read and initialize arguments: GraphHopperConfig graphHopperConfiguration = new GraphHopperConfig(); graphHopperConfiguration.setProfiles(Collections.singletonList(new ProfileConfig("fast_car").setVehicle("car").setWeighting("fastest"))); graphHopperConfiguration.putObject("graph.location", "graph-cache"); seed = args.getLong("seed"); count = args.getInt("count"); GraphHopper graphHopper = new GraphHopperOSM(); graphHopper.init(graphHopperConfiguration).forDesktop(); graphHopper.importOrLoad(); // and map-matching stuff GraphHopperStorage graph = graphHopper.getGraphHopperStorage(); bbox = graph.getBounds(); LocationIndexTree locationIndex = (LocationIndexTree) graphHopper.getLocationIndex(); MapMatching mapMatching = new MapMatching(graphHopper, new HintsMap().putObject("profile", "fast_car")); // start tests: StopWatch sw = new StopWatch().start(); try { printLocationIndexMatchQuery(locationIndex); printTimeOfMapMatchQuery(graphHopper, mapMatching); System.gc(); } catch (Exception ex) { logger.error("Problem while measuring", ex); properties.put("error", "" + ex.toString()); } finally { properties.put("measurement.count", "" + count); properties.put("measurement.seed", "" + seed); properties.put("measurement.time", "" + sw.stop().getMillis()); System.gc(); properties.put("measurement.totalMB", "" + Helper.getTotalMB()); properties.put("measurement.usedMB", "" + Helper.getUsedMB()); try { FileWriter fileWriter = new FileWriter(args.<File>get("outfile")); for (Entry<String, String> e : properties.entrySet()) { fileWriter.append(e.getKey()); fileWriter.append("="); fileWriter.append(e.getValue()); fileWriter.append("\n"); } fileWriter.flush(); } catch (IOException ex) { logger.error( "Problem while writing measurements", ex); } } }
#vulnerable code @Override public void run(Bootstrap bootstrap, Namespace args) { // read and initialize arguments: GraphHopperConfig graphHopperConfiguration = new GraphHopperConfig(); graphHopperConfiguration.putObject("graph.location", "graph-cache"); seed = args.getLong("seed"); count = args.getInt("count"); GraphHopper graphHopper = new GraphHopperOSM(); graphHopper.init(graphHopperConfiguration).forDesktop(); graphHopper.importOrLoad(); // and map-matching stuff GraphHopperStorage graph = graphHopper.getGraphHopperStorage(); bbox = graph.getBounds(); LocationIndexTree locationIndex = (LocationIndexTree) graphHopper.getLocationIndex(); MapMatching mapMatching = new MapMatching(graphHopper, new HintsMap()); // start tests: StopWatch sw = new StopWatch().start(); try { printLocationIndexMatchQuery(locationIndex); printTimeOfMapMatchQuery(graphHopper, mapMatching); System.gc(); } catch (Exception ex) { logger.error("Problem while measuring", ex); properties.put("error", "" + ex.toString()); } finally { properties.put("measurement.count", "" + count); properties.put("measurement.seed", "" + seed); properties.put("measurement.time", "" + sw.stop().getMillis()); System.gc(); properties.put("measurement.totalMB", "" + Helper.getTotalMB()); properties.put("measurement.usedMB", "" + Helper.getUsedMB()); try { FileWriter fileWriter = new FileWriter(args.<File>get("outfile")); for (Entry<String, String> e : properties.entrySet()) { fileWriter.append(e.getKey()); fileWriter.append("="); fileWriter.append(e.getValue()); fileWriter.append("\n"); } fileWriter.flush(); } catch (IOException ex) { logger.error( "Problem while writing measurements", ex); } } } #location 44 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testEvaluate() throws IOException { { List<String> ftvec1 = Arrays.asList("bbb:1.4", "aaa:0.9", "ccc"); Assert.assertEquals(1.f, CosineSimilarityUDF.cosineSimilarity(ftvec1, ftvec1), 0.0); } Assert.assertEquals(0.f, CosineSimilarityUDF.cosineSimilarity(Arrays.asList("a", "b", "c"), Arrays.asList("d", "e")), 0.0); Assert.assertEquals(0.f, CosineSimilarityUDF.cosineSimilarity(Arrays.asList("a", "b", "c"), Arrays.asList("d", "e")), 0.0); Assert.assertEquals(1.f, CosineSimilarityUDF.cosineSimilarity(Arrays.asList("a", "b"), Arrays.asList("a", "b")), 0.0); Assert.assertEquals(0.5f, CosineSimilarityUDF.cosineSimilarity(Arrays.asList("a", "b"), Arrays.asList("a", "c")), 0.0); Assert.assertEquals(-1.f, CosineSimilarityUDF.cosineSimilarity(Arrays.asList("a:1.0"), Arrays.asList("a:-1.0")), 0.0); Assert.assertTrue(CosineSimilarityUDF.cosineSimilarity(Arrays.asList("apple", "orange"), Arrays.asList("banana", "apple")) > 0.f); Assert.assertTrue(CosineSimilarityUDF.cosineSimilarity(Arrays.asList("apple", "orange"), Arrays.asList("banana", "apple")) > 0.f); Assert.assertTrue((CosineSimilarityUDF.cosineSimilarity(Arrays.asList("apple", "orange"), Arrays.asList("banana", "orange", "apple"))) > (CosineSimilarityUDF.cosineSimilarity(Arrays.asList("apple", "orange"), Arrays.asList("banana", "orange")))); Assert.assertEquals(1.0f, CosineSimilarityUDF.cosineSimilarity(Arrays.asList("This is a sentence with seven tokens".split(" ")), Arrays.<String> asList("This is a sentence with seven tokens".split(" "))), 0.0); Assert.assertEquals(1.0f, CosineSimilarityUDF.cosineSimilarity(Arrays.asList("This is a sentence with seven tokens".split(" ")), Arrays.<String> asList("This is a sentence with seven tokens".split(" "))), 0.0); { List<String> tokens1 = Arrays.asList("1:1,2:1,3:1,4:1,5:0,6:1,7:1,8:1,9:0,10:1,11:1".split(",")); List<String> tokens2 = Arrays.asList("1:1,2:1,3:0,4:1,5:1,6:1,7:1,8:0,9:1,10:1,11:1".split(",")); Assert.assertEquals(0.77777f, CosineSimilarityUDF.cosineSimilarity(tokens1, tokens2), 0.00001f); } { List<String> tokens1 = Arrays.asList("1 2 3 4 6 7 8 10 11".split("\\s+")); List<String> tokens2 = Arrays.asList("1 2 4 5 6 7 9 10 11".split("\\s+")); double dotp = 1 + 1 + 0 + 1 + 0 + 1 + 1 + 0 + 0 + 1 + 1; double norm = Math.sqrt(tokens1.size()) * Math.sqrt(tokens2.size()); Assert.assertEquals(dotp / norm, CosineSimilarityUDF.cosineSimilarity(tokens1, tokens2), 0.00001f); Assert.assertEquals(dotp / norm, CosineSimilarityUDF.cosineSimilarity(tokens1, tokens2), 0.00001f); Assert.assertEquals(dotp / norm, CosineSimilarityUDF.cosineSimilarity(Arrays.asList("1", "2", "3", "4", "6", "7", "8", "10", "11"), Arrays.asList("1", "2", "4", "5", "6", "7", "9", "10", "11")), 0.00001f); } Assert.assertEquals(0.f, CosineSimilarityUDF.cosineSimilarity(Arrays.asList("1", "2", "3"), Arrays.asList("4", "5")), 0.0); Assert.assertEquals(1.f, CosineSimilarityUDF.cosineSimilarity(Arrays.asList("1", "2"), Arrays.asList("1", "2")), 0.0); }
#vulnerable code @Test public void testEvaluate() { CosineSimilarityUDF cosine = new CosineSimilarityUDF(); { List<String> ftvec1 = Arrays.asList("bbb:1.4", "aaa:0.9", "ccc"); Assert.assertEquals(1.f, cosine.evaluate(ftvec1, ftvec1).get(), 0.0); } Assert.assertEquals(0.f, cosine.evaluate(Arrays.asList("a", "b", "c"), Arrays.asList("d", "e")).get(), 0.0); Assert.assertEquals(0.f, cosine.evaluate(Arrays.asList("a", "b", "c"), Arrays.asList("d", "e")).get(), 0.0); Assert.assertEquals(1.f, cosine.evaluate(Arrays.asList("a", "b"), Arrays.asList("a", "b")).get(), 0.0); Assert.assertEquals(0.5f, cosine.evaluate(Arrays.asList("a", "b"), Arrays.asList("a", "c")).get(), 0.0); Assert.assertEquals(-1.f, cosine.evaluate(Arrays.asList("a:1.0"), Arrays.asList("a:-1.0")).get(), 0.0); Assert.assertTrue(cosine.evaluate(Arrays.asList("apple", "orange"), Arrays.asList("banana", "apple")).get() > 0.f); Assert.assertTrue(cosine.evaluate(Arrays.asList("apple", "orange"), Arrays.asList("banana", "apple")).get() > 0.f); Assert.assertTrue((cosine.evaluate(Arrays.asList("apple", "orange"), Arrays.asList("banana", "orange", "apple"))).get() > (cosine.evaluate(Arrays.asList("apple", "orange"), Arrays.asList("banana", "orange"))).get()); Assert.assertEquals(1.0f, cosine.evaluate(Arrays.asList("This is a sentence with seven tokens".split(" ")), Arrays.<String> asList("This is a sentence with seven tokens".split(" "))).get(), 0.0); Assert.assertEquals(1.0f, cosine.evaluate(Arrays.asList("This is a sentence with seven tokens".split(" ")), Arrays.<String> asList("This is a sentence with seven tokens".split(" "))).get(), 0.0); { List<String> tokens1 = Arrays.asList("1:1,2:1,3:1,4:1,5:0,6:1,7:1,8:1,9:0,10:1,11:1".split(",")); List<String> tokens2 = Arrays.asList("1:1,2:1,3:0,4:1,5:1,6:1,7:1,8:0,9:1,10:1,11:1".split(",")); Assert.assertEquals(0.77777f, cosine.evaluate(tokens1, tokens2).get(), 0.00001f); } { List<String> tokens1 = Arrays.asList("1 2 3 4 6 7 8 10 11".split("\\s+")); List<String> tokens2 = Arrays.asList("1 2 4 5 6 7 9 10 11".split("\\s+")); double dotp = 1 + 1 + 0 + 1 + 0 + 1 + 1 + 0 + 0 + 1 + 1; double norm = Math.sqrt(tokens1.size()) * Math.sqrt(tokens2.size()); Assert.assertEquals(dotp / norm, cosine.evaluate(tokens1, tokens2).get(), 0.00001f); Assert.assertEquals(dotp / norm, cosine.evaluate(tokens1, tokens2).get(), 0.00001f); Assert.assertEquals(dotp / norm, cosine.evaluate(Arrays.asList("1", "2", "3", "4", "6", "7", "8", "10", "11"), Arrays.asList("1", "2", "4", "5", "6", "7", "9", "10", "11")).get(), 0.00001f); } Assert.assertEquals(0.f, cosine.evaluate(Arrays.asList("1", "2", "3"), Arrays.asList("4", "5")).get(), 0.0); Assert.assertEquals(1.f, cosine.evaluate(Arrays.asList("1", "2"), Arrays.asList("1", "2")).get(), 0.0); } #location 45 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private static int evalPredict(DecisionTree tree, double[] x) throws HiveException, IOException { String script = tree.predictCodegen(); System.out.println(script); TreePredictByJavascriptUDF udf = new TreePredictByJavascriptUDF(); udf.initialize(new ObjectInspector[] { PrimitiveObjectInspectorFactory.javaStringObjectInspector, ObjectInspectorFactory.getStandardListObjectInspector(PrimitiveObjectInspectorFactory.javaDoubleObjectInspector) }); IntWritable result = (IntWritable) udf.evaluate(script, x, true); result = (IntWritable) udf.evaluate(script, x, true); udf.close(); return result.get(); }
#vulnerable code private static int evalPredict(DecisionTree tree, double[] x) throws HiveException, IOException { String script = tree.predictCodegen(); System.out.println(script); TreePredictTrustedUDF udf = new TreePredictTrustedUDF(); udf.initialize(new ObjectInspector[] { PrimitiveObjectInspectorFactory.javaStringObjectInspector, ObjectInspectorFactory.getStandardListObjectInspector(PrimitiveObjectInspectorFactory.javaDoubleObjectInspector) }); IntWritable result = (IntWritable) udf.evaluate(script, x, true); result = (IntWritable) udf.evaluate(script, x, true); udf.close(); return result.get(); } #location 11 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private static long loadPredictionModel(PredictionModel model, File file, PrimitiveObjectInspector featureOI, WritableFloatObjectInspector weightOI, WritableFloatObjectInspector covarOI) throws IOException, SerDeException { long count = 0L; if(!file.exists()) { return count; } if(!file.getName().endsWith(".crc")) { if(file.isDirectory()) { for(File f : file.listFiles()) { count += loadPredictionModel(model, f, featureOI, weightOI, covarOI); } } else { LazySimpleSerDe serde = HiveUtils.getLineSerde(featureOI, weightOI, covarOI); StructObjectInspector lineOI = (StructObjectInspector) serde.getObjectInspector(); StructField c1ref = lineOI.getStructFieldRef("c1"); StructField c2ref = lineOI.getStructFieldRef("c2"); StructField c3ref = lineOI.getStructFieldRef("c3"); PrimitiveObjectInspector c1oi = (PrimitiveObjectInspector) c1ref.getFieldObjectInspector(); FloatObjectInspector c2oi = (FloatObjectInspector) c2ref.getFieldObjectInspector(); FloatObjectInspector c3oi = (FloatObjectInspector) c3ref.getFieldObjectInspector(); BufferedReader reader = null; try { reader = HadoopUtils.getBufferedReader(file); String line; while((line = reader.readLine()) != null) { count++; Text lineText = new Text(line); Object lineObj = serde.deserialize(lineText); List<Object> fields = lineOI.getStructFieldsDataAsList(lineObj); Object f0 = fields.get(0); Object f1 = fields.get(1); Object f2 = fields.get(2); if(f0 == null || f1 == null) { continue; // avoid unexpected case } Object k = c1oi.getPrimitiveWritableObject(c1oi.copyObject(f0)); float v = c2oi.get(f1); float cov = (f2 == null) ? WeightValueWithCovar.DEFAULT_COVAR : c3oi.get(f2); model.set(k, new WeightValueWithCovar(v, cov, false)); } } finally { IOUtils.closeQuietly(reader); } } } return count; }
#vulnerable code private static long loadPredictionModel(PredictionModel model, File file, PrimitiveObjectInspector featureOI, WritableFloatObjectInspector weightOI, WritableFloatObjectInspector covarOI) throws IOException, SerDeException { long count = 0L; if(!file.exists()) { return count; } if(!file.getName().endsWith(".crc")) { if(file.isDirectory()) { for(File f : file.listFiles()) { count += loadPredictionModel(model, f, featureOI, weightOI, covarOI); } } else { LazySimpleSerDe serde = HiveUtils.getLineSerde(featureOI, weightOI, covarOI); StructObjectInspector lineOI = (StructObjectInspector) serde.getObjectInspector(); StructField c1ref = lineOI.getStructFieldRef("c1"); StructField c2ref = lineOI.getStructFieldRef("c2"); StructField c3ref = lineOI.getStructFieldRef("c3"); PrimitiveObjectInspector c1oi = (PrimitiveObjectInspector) c1ref.getFieldObjectInspector(); FloatObjectInspector c2oi = (FloatObjectInspector) c2ref.getFieldObjectInspector(); FloatObjectInspector c3oi = (FloatObjectInspector) c3ref.getFieldObjectInspector(); final BufferedReader reader = HadoopUtils.getBufferedReader(file); try { String line; while((line = reader.readLine()) != null) { count++; Text lineText = new Text(line); Object lineObj = serde.deserialize(lineText); List<Object> fields = lineOI.getStructFieldsDataAsList(lineObj); Object f0 = fields.get(0); Object f1 = fields.get(1); Object f2 = fields.get(2); if(f0 == null || f1 == null) { continue; // avoid unexpected case } Object k = c1oi.getPrimitiveWritableObject(c1oi.copyObject(f0)); float v = c2oi.get(f1); float cov = (f2 == null) ? WeightValueWithCovar.DEFAULT_COVAR : c3oi.get(f2); model.set(k, new WeightValueWithCovar(v, cov, false)); } } finally { reader.close(); } } } return count; } #location 43 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private long loadPredictionModel(Map<Object, PredictionModel> label2model, File file, PrimitiveObjectInspector labelOI, PrimitiveObjectInspector featureOI, WritableFloatObjectInspector weightOI, WritableFloatObjectInspector covarOI) throws IOException, SerDeException { long count = 0L; if(!file.exists()) { return count; } if(!file.getName().endsWith(".crc")) { if(file.isDirectory()) { for(File f : file.listFiles()) { count += loadPredictionModel(label2model, f, labelOI, featureOI, weightOI, covarOI); } } else { LazySimpleSerDe serde = HiveUtils.getLineSerde(labelOI, featureOI, weightOI, covarOI); StructObjectInspector lineOI = (StructObjectInspector) serde.getObjectInspector(); StructField c1ref = lineOI.getStructFieldRef("c1"); StructField c2ref = lineOI.getStructFieldRef("c2"); StructField c3ref = lineOI.getStructFieldRef("c3"); StructField c4ref = lineOI.getStructFieldRef("c4"); PrimitiveObjectInspector c1refOI = (PrimitiveObjectInspector) c1ref.getFieldObjectInspector(); PrimitiveObjectInspector c2refOI = (PrimitiveObjectInspector) c2ref.getFieldObjectInspector(); FloatObjectInspector c3refOI = (FloatObjectInspector) c3ref.getFieldObjectInspector(); FloatObjectInspector c4refOI = (FloatObjectInspector) c4ref.getFieldObjectInspector(); BufferedReader reader = null; try { reader = HadoopUtils.getBufferedReader(file); String line; while((line = reader.readLine()) != null) { count++; Text lineText = new Text(line); Object lineObj = serde.deserialize(lineText); List<Object> fields = lineOI.getStructFieldsDataAsList(lineObj); Object f0 = fields.get(0); Object f1 = fields.get(1); Object f2 = fields.get(2); Object f3 = fields.get(3); if(f0 == null || f1 == null || f2 == null) { continue; // avoid unexpected case } Object label = c1refOI.getPrimitiveWritableObject(c1refOI.copyObject(f0)); PredictionModel model = label2model.get(label); if(model == null) { model = createModel(); label2model.put(label, model); } Object k = c2refOI.getPrimitiveWritableObject(c2refOI.copyObject(f1)); float v = c3refOI.get(f2); float cov = (f3 == null) ? WeightValueWithCovar.DEFAULT_COVAR : c4refOI.get(f3); model.set(k, new WeightValueWithCovar(v, cov, false)); } } finally { IOUtils.closeQuietly(reader); } } } return count; }
#vulnerable code private long loadPredictionModel(Map<Object, PredictionModel> label2model, File file, PrimitiveObjectInspector labelOI, PrimitiveObjectInspector featureOI, WritableFloatObjectInspector weightOI, WritableFloatObjectInspector covarOI) throws IOException, SerDeException { long count = 0L; if(!file.exists()) { return count; } if(!file.getName().endsWith(".crc")) { if(file.isDirectory()) { for(File f : file.listFiles()) { count += loadPredictionModel(label2model, f, labelOI, featureOI, weightOI, covarOI); } } else { LazySimpleSerDe serde = HiveUtils.getLineSerde(labelOI, featureOI, weightOI, covarOI); StructObjectInspector lineOI = (StructObjectInspector) serde.getObjectInspector(); StructField c1ref = lineOI.getStructFieldRef("c1"); StructField c2ref = lineOI.getStructFieldRef("c2"); StructField c3ref = lineOI.getStructFieldRef("c3"); StructField c4ref = lineOI.getStructFieldRef("c4"); PrimitiveObjectInspector c1refOI = (PrimitiveObjectInspector) c1ref.getFieldObjectInspector(); PrimitiveObjectInspector c2refOI = (PrimitiveObjectInspector) c2ref.getFieldObjectInspector(); FloatObjectInspector c3refOI = (FloatObjectInspector) c3ref.getFieldObjectInspector(); FloatObjectInspector c4refOI = (FloatObjectInspector) c4ref.getFieldObjectInspector(); final BufferedReader reader = HadoopUtils.getBufferedReader(file); try { String line; while((line = reader.readLine()) != null) { count++; Text lineText = new Text(line); Object lineObj = serde.deserialize(lineText); List<Object> fields = lineOI.getStructFieldsDataAsList(lineObj); Object f0 = fields.get(0); Object f1 = fields.get(1); Object f2 = fields.get(2); Object f3 = fields.get(3); if(f0 == null || f1 == null || f2 == null) { continue; // avoid unexpected case } Object label = c1refOI.getPrimitiveWritableObject(c1refOI.copyObject(f0)); PredictionModel model = label2model.get(label); if(model == null) { model = createModel(); label2model.put(label, model); } Object k = c2refOI.getPrimitiveWritableObject(c2refOI.copyObject(f1)); float v = c3refOI.get(f2); float cov = (f3 == null) ? WeightValueWithCovar.DEFAULT_COVAR : c4refOI.get(f3); model.set(k, new WeightValueWithCovar(v, cov, false)); } } finally { reader.close(); } } } return count; } #location 52 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override protected CommandLine processOptions(ObjectInspector[] argOIs) throws UDFArgumentException { CommandLine cl = super.processOptions(argOIs); if(cl == null) { this.eta0 = 0.1f; this.eps = 1.f; this.scaling = 100f; } else { this.eta0 = Primitives.parseFloat(cl.getOptionValue("eta0"), 0.1f); this.eps = Primitives.parseFloat(cl.getOptionValue("eps"), 1.f); this.scaling = Primitives.parseFloat(cl.getOptionValue("scale"), 100f); } return cl; }
#vulnerable code @Override protected CommandLine processOptions(ObjectInspector[] argOIs) throws UDFArgumentException { CommandLine cl = super.processOptions(argOIs); this.eta0 = Primitives.parseFloat(cl.getOptionValue("eta0"), 0.1f); this.eps = Primitives.parseFloat(cl.getOptionValue("eps"), 1.f); this.scaling = Primitives.parseFloat(cl.getOptionValue("scale"), 100f); return cl; } #location 4 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private boolean executeOperation(Operation currentOperation) throws VMRuntimeException { if(IP < 0) { return false; } switch (currentOperation.op) { case GOTO: { if(isInt(currentOperation.operand)) { IP = Integer.parseInt(currentOperation.operand); } else { IP = jumpMap.get(currentOperation.operand); } break; } case CALL: { double candidateIP = valuesMap.get(currentOperation.operand); if(candidateIP < 0) { evaluateBuiltinByName(currentOperation.operand); IP++; } break; } case IFEQ: { // follow the rule of smile's Math class. double a = pop(); double b = pop(); double absa = Math.abs(a); double absb = Math.abs(b); if(a == b || Math.abs(a - b) <= Math.min(absa, absb) * 2.2204460492503131e-16) { if(isInt(currentOperation.operand)) { IP = Integer.parseInt(currentOperation.operand); } else { IP = jumpMap.get(currentOperation.operand); } } else { IP++; } break; } case IFGR: { double lower = pop(); double upper = pop(); if(upper > lower) { if(isInt(currentOperation.operand)) { IP = Integer.parseInt(currentOperation.operand); } else { IP = jumpMap.get(currentOperation.operand); } } else { IP++; } break; } case POP: { valuesMap.put(currentOperation.operand, pop()); IP++; break; } case PUSH: { if(isDouble(currentOperation.operand)) push(Double.parseDouble(currentOperation.operand)); else { Double v = valuesMap.get(currentOperation.operand); if(v == null) { throw new VMRuntimeException("value is not binded: " + currentOperation.operand); } push(v); } IP++; break; } default: throw new VMRuntimeException("Machine code has wrong opcode :" + currentOperation.op); } return true; }
#vulnerable code private boolean executeOperation(Operation currentOperation) throws VMRuntimeException { if(IP < 0) return false; switch (currentOperation.op) { case GOTO: if(isInt(currentOperation.operand)) IP = Integer.parseInt(currentOperation.operand); else IP = jumpMap.get(currentOperation.operand); break; case CALL: double candidateIP = valuesMap.get(currentOperation.operand); if(candidateIP < 0) { evaluateBuiltinByName(currentOperation.operand); IP++; } break; case IFEQ: // follow the rule of smile's Math class. double a = pop(); double b = pop(); double absa = Math.abs(a); double absb = Math.abs(b); if(a == b || Math.abs(a - b) <= Math.min(absa, absb) * 2.2204460492503131e-16) if(isInt(currentOperation.operand)) IP = Integer.parseInt(currentOperation.operand); else IP = jumpMap.get(currentOperation.operand); else IP++; break; case IFGR: double lower = pop(); double upper = pop(); if(upper > lower) if(isInt(currentOperation.operand)) IP = Integer.parseInt(currentOperation.operand); else IP = jumpMap.get(currentOperation.operand); else IP++; break; case POP: valuesMap.put(currentOperation.operand, pop()); IP++; break; case PUSH: if(isDouble(currentOperation.operand)) push(Double.parseDouble(currentOperation.operand)); else { Double v = valuesMap.get(currentOperation.operand); if(v == null) { throw new VMRuntimeException("value is not binded: " + currentOperation.operand); } push(v); } IP++; break; default: throw new IllegalArgumentException("Machine code has wrong opcode :" + currentOperation.op); } return true; } #location 12 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Nonnull public byte[] predictSerCodegen(boolean compress) throws HiveException { try { if (compress) { return ObjectUtils.toCompressedBytes(_root); } else { return ObjectUtils.toBytes(_root); } } catch (IOException ioe) { throw new HiveException("IOException cause while serializing DecisionTree object", ioe); } catch (Exception e) { throw new HiveException("Exception cause while serializing DecisionTree object", e); } }
#vulnerable code @Nonnull public byte[] predictSerCodegen(boolean compress) throws HiveException { final Attribute[] attrs = _attributes; assert (attrs != null); FastMultiByteArrayOutputStream bos = new FastMultiByteArrayOutputStream(); OutputStream wrapped = compress ? new DeflaterOutputStream(bos) : bos; ObjectOutputStream oos = null; try { oos = new ObjectOutputStream(wrapped); _root.writeExternal(oos); oos.flush(); } catch (IOException ioe) { throw new HiveException("IOException cause while serializing DecisionTree object", ioe); } catch (Exception e) { throw new HiveException("Exception cause while serializing DecisionTree object", e); } finally { IOUtils.closeQuietly(oos); } return bos.toByteArray_clear(); } #location 20 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Nonnull public byte[] predictSerCodegen(boolean compress) throws HiveException { try { if (compress) { return ObjectUtils.toCompressedBytes(_root); } else { return ObjectUtils.toBytes(_root); } } catch (IOException ioe) { throw new HiveException("IOException cause while serializing DecisionTree object", ioe); } catch (Exception e) { throw new HiveException("Exception cause while serializing DecisionTree object", e); } }
#vulnerable code @Nonnull public byte[] predictSerCodegen(boolean compress) throws HiveException { final Attribute[] attrs = _attributes; assert (attrs != null); FastMultiByteArrayOutputStream bos = new FastMultiByteArrayOutputStream(); OutputStream wrapped = compress ? new DeflaterOutputStream(bos) : bos; ObjectOutputStream oos = null; try { oos = new ObjectOutputStream(wrapped); _root.writeExternal(oos); oos.flush(); } catch (IOException ioe) { throw new HiveException("IOException cause while serializing DecisionTree object", ioe); } catch (Exception e) { throw new HiveException("Exception cause while serializing DecisionTree object", e); } finally { IOUtils.closeQuietly(oos); } return bos.toByteArray_clear(); } #location 18 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void test2() { List<String> ftvec1 = Arrays.asList("1:1.0", "2:3.0", "3:3.0"); List<String> ftvec2 = Arrays.asList("1:2.0", "3:6.0"); double d = EuclidDistanceUDF.euclidDistance(ftvec1, ftvec2); Assert.assertEquals(Math.sqrt(1.0 + 9.0 + 9.0), d, 0.f); }
#vulnerable code @Test public void test2() { EuclidDistanceUDF udf = new EuclidDistanceUDF(); List<String> ftvec1 = Arrays.asList("1:1.0", "2:3.0", "3:3.0"); List<String> ftvec2 = Arrays.asList("1:2.0", "3:6.0"); FloatWritable d = udf.evaluate(ftvec1, ftvec2); Assert.assertEquals((float) Math.sqrt(1.0 + 9.0 + 9.0), d.get(), 0.f); } #location 6 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static Node deserializeNode(final byte[] serializedObj, final int length, final boolean compressed) throws HiveException { final Node root = new Node(); try { if (compressed) { ObjectUtils.readCompressedObject(serializedObj, 0, length, root); } else { ObjectUtils.readObject(serializedObj, length, root); } } catch (IOException ioe) { throw new HiveException("IOException cause while deserializing DecisionTree object", ioe); } catch (Exception e) { throw new HiveException("Exception cause while deserializing DecisionTree object", e); } return root; }
#vulnerable code public static Node deserializeNode(final byte[] serializedObj, final int length, final boolean compressed) throws HiveException { FastByteArrayInputStream bis = new FastByteArrayInputStream(serializedObj, length); InputStream wrapped = compressed ? new InflaterInputStream(bis) : bis; final Node root; ObjectInputStream ois = null; try { ois = new ObjectInputStream(wrapped); root = new Node(); root.readExternal(ois); } catch (IOException ioe) { throw new HiveException("IOException cause while deserializing DecisionTree object", ioe); } catch (Exception e) { throw new HiveException("Exception cause while deserializing DecisionTree object", e); } finally { IOUtils.closeQuietly(ois); } return root; } #location 9 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private static int evalPredict(DecisionTree tree, double[] x) throws HiveException, IOException { ArrayList<String> opScript = tree.predictOpCodegen(); System.out.println(opScript); TreePredictByStackMachineUDF udf = new TreePredictByStackMachineUDF(); udf.initialize(new ObjectInspector[] { PrimitiveObjectInspectorFactory.javaStringObjectInspector, ObjectInspectorFactory.getStandardListObjectInspector(PrimitiveObjectInspectorFactory.javaDoubleObjectInspector) }); IntWritable result = (IntWritable) udf.evaluate(opScript, x, true); udf.close(); return result.get(); }
#vulnerable code private static int evalPredict(DecisionTree tree, double[] x) throws HiveException, IOException { ArrayList<String> opScript = tree.predictOpCodegen(); System.out.println(opScript); VMTreePredictTrustedUDF udf = new VMTreePredictTrustedUDF(); udf.initialize(new ObjectInspector[] { PrimitiveObjectInspectorFactory.javaStringObjectInspector, ObjectInspectorFactory.getStandardListObjectInspector(PrimitiveObjectInspectorFactory.javaDoubleObjectInspector) }); IntWritable result = (IntWritable) udf.evaluate(opScript, x, true); udf.close(); return result.get(); } #location 10 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Nonnull public byte[] predictSerCodegen(boolean compress) throws HiveException { try { if (compress) { return ObjectUtils.toCompressedBytes(_root); } else { return ObjectUtils.toBytes(_root); } } catch (IOException ioe) { throw new HiveException("IOException cause while serializing DecisionTree object", ioe); } catch (Exception e) { throw new HiveException("Exception cause while serializing DecisionTree object", e); } }
#vulnerable code @Nonnull public byte[] predictSerCodegen(boolean compress) throws HiveException { FastMultiByteArrayOutputStream bos = new FastMultiByteArrayOutputStream(); OutputStream wrapped = compress ? new DeflaterOutputStream(bos) : bos; ObjectOutputStream oos = null; try { oos = new ObjectOutputStream(wrapped); _root.writeExternal(oos); oos.flush(); } catch (IOException ioe) { throw new HiveException("IOException cause while serializing DecisionTree object", ioe); } catch (Exception e) { throw new HiveException("Exception cause while serializing DecisionTree object", e); } finally { IOUtils.closeQuietly(oos); } return bos.toByteArray_clear(); } #location 17 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Nonnull public byte[] predictSerCodegen(boolean compress) throws HiveException { try { if (compress) { return ObjectUtils.toCompressedBytes(_root); } else { return ObjectUtils.toBytes(_root); } } catch (IOException ioe) { throw new HiveException("IOException cause while serializing DecisionTree object", ioe); } catch (Exception e) { throw new HiveException("Exception cause while serializing DecisionTree object", e); } }
#vulnerable code @Nonnull public byte[] predictSerCodegen(boolean compress) throws HiveException { FastMultiByteArrayOutputStream bos = new FastMultiByteArrayOutputStream(); OutputStream wrapped = compress ? new DeflaterOutputStream(bos) : bos; ObjectOutputStream oos = null; try { oos = new ObjectOutputStream(wrapped); _root.writeExternal(oos); oos.flush(); } catch (IOException ioe) { throw new HiveException("IOException cause while serializing DecisionTree object", ioe); } catch (Exception e) { throw new HiveException("Exception cause while serializing DecisionTree object", e); } finally { IOUtils.closeQuietly(oos); } return bos.toByteArray_clear(); } #location 10 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public float getCovariance(float scale) { return 1.f / (sum_inv_covar * scale); }
#vulnerable code @Override public float getCovariance(float scale) { assert (num_updates > 0) : num_updates; return (sum_inv_covar * scale) * num_updates; // Harmonic mean } #location 3 #vulnerability type UNSAFE_GUARDED_BY_ACCESS
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private static int evalPredict(RegressionTree tree, double[] x) throws HiveException, IOException { ArrayList<String> opScript = tree.predictOpCodegen(); System.out.println(opScript); TreePredictByStackMachineUDF udf = new TreePredictByStackMachineUDF(); udf.initialize(new ObjectInspector[] { PrimitiveObjectInspectorFactory.javaStringObjectInspector, ObjectInspectorFactory.getStandardListObjectInspector(PrimitiveObjectInspectorFactory.javaDoubleObjectInspector) }); IntWritable result = (IntWritable) udf.evaluate(opScript, x, true); udf.close(); return result.get(); }
#vulnerable code private static int evalPredict(RegressionTree tree, double[] x) throws HiveException, IOException { ArrayList<String> opScript = tree.predictOpCodegen(); System.out.println(opScript); VMTreePredictTrustedUDF udf = new VMTreePredictTrustedUDF(); udf.initialize(new ObjectInspector[] { PrimitiveObjectInspectorFactory.javaStringObjectInspector, ObjectInspectorFactory.getStandardListObjectInspector(PrimitiveObjectInspectorFactory.javaDoubleObjectInspector) }); IntWritable result = (IntWritable) udf.evaluate(opScript, x, true); udf.close(); return result.get(); } #location 11 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private static int evalPredict(DecisionTree tree, double[] x) throws HiveException, IOException { TreePredictByStackMachineUDF udf = new TreePredictByStackMachineUDF(); String opScript = tree.predictOpCodegen(StackMachine.SEP); debugPrint(opScript); IntWritable result = (IntWritable) udf.evaluate(opScript, x, true); udf.close(); return result.get(); }
#vulnerable code private static int evalPredict(DecisionTree tree, double[] x) throws HiveException, IOException { String opScript = tree.predictOpCodegen(StackMachine.SEP); debugPrint(opScript); IntWritable result = (IntWritable) TreePredictByStackMachineUDF.evaluate(opScript, x, true); return result.get(); } #location 5 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static Node deserializeNode(final byte[] serializedObj, final int length, final boolean compressed) throws HiveException { final Node root = new Node(); try { if (compressed) { ObjectUtils.readCompressedObject(serializedObj, 0, length, root); } else { ObjectUtils.readObject(serializedObj, length, root); } } catch (IOException ioe) { throw new HiveException("IOException cause while deserializing DecisionTree object", ioe); } catch (Exception e) { throw new HiveException("Exception cause while deserializing DecisionTree object", e); } return root; }
#vulnerable code public static Node deserializeNode(final byte[] serializedObj, final int length, final boolean compressed) throws HiveException { FastByteArrayInputStream bis = new FastByteArrayInputStream(serializedObj, length); InputStream wrapped = compressed ? new InflaterInputStream(bis) : bis; final Node root; ObjectInputStream ois = null; try { ois = new ObjectInputStream(wrapped); root = new Node(); root.readExternal(ois); } catch (IOException ioe) { throw new HiveException("IOException cause while deserializing DecisionTree object", ioe); } catch (Exception e) { throw new HiveException("Exception cause while deserializing DecisionTree object", e); } finally { IOUtils.closeQuietly(ois); } return root; } #location 9 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static Node deserializeNode(final byte[] serializedObj, final int length, final boolean compressed) throws HiveException { final Node root = new Node(); try { if (compressed) { ObjectUtils.readCompressedObject(serializedObj, 0, length, root); } else { ObjectUtils.readObject(serializedObj, length, root); } } catch (IOException ioe) { throw new HiveException("IOException cause while deserializing DecisionTree object", ioe); } catch (Exception e) { throw new HiveException("Exception cause while deserializing DecisionTree object", e); } return root; }
#vulnerable code public static Node deserializeNode(final byte[] serializedObj, final int length, final boolean compressed) throws HiveException { FastByteArrayInputStream bis = new FastByteArrayInputStream(serializedObj, length); InputStream wrapped = compressed ? new InflaterInputStream(bis) : bis; final Node root; ObjectInputStream ois = null; try { ois = new ObjectInputStream(wrapped); root = new Node(); root.readExternal(ois); } catch (IOException ioe) { throw new HiveException("IOException cause while deserializing DecisionTree object", ioe); } catch (Exception e) { throw new HiveException("Exception cause while deserializing DecisionTree object", e); } finally { IOUtils.closeQuietly(ois); } return root; } #location 18 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static Node deserializeNode(final byte[] serializedObj, final int length, final boolean compressed) throws HiveException { final Node root = new Node(); try { if (compressed) { ObjectUtils.readCompressedObject(serializedObj, 0, length, root); } else { ObjectUtils.readObject(serializedObj, length, root); } } catch (IOException ioe) { throw new HiveException("IOException cause while deserializing DecisionTree object", ioe); } catch (Exception e) { throw new HiveException("Exception cause while deserializing DecisionTree object", e); } return root; }
#vulnerable code public static Node deserializeNode(final byte[] serializedObj, final int length, final boolean compressed) throws HiveException { FastByteArrayInputStream bis = new FastByteArrayInputStream(serializedObj, length); InputStream wrapped = compressed ? new InflaterInputStream(bis) : bis; final Node root; ObjectInputStream ois = null; try { ois = new ObjectInputStream(wrapped); root = new Node(); root.readExternal(ois); } catch (IOException ioe) { throw new HiveException("IOException cause while deserializing DecisionTree object", ioe); } catch (Exception e) { throw new HiveException("Exception cause while deserializing DecisionTree object", e); } finally { IOUtils.closeQuietly(ois); } return root; } #location 9 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private static long loadPredictionModel(PredictionModel model, File file, PrimitiveObjectInspector keyOI, WritableFloatObjectInspector valueOI) throws IOException, SerDeException { long count = 0L; if(!file.exists()) { return count; } if(!file.getName().endsWith(".crc")) { if(file.isDirectory()) { for(File f : file.listFiles()) { count += loadPredictionModel(model, f, keyOI, valueOI); } } else { LazySimpleSerDe serde = HiveUtils.getKeyValueLineSerde(keyOI, valueOI); StructObjectInspector lineOI = (StructObjectInspector) serde.getObjectInspector(); StructField keyRef = lineOI.getStructFieldRef("key"); StructField valueRef = lineOI.getStructFieldRef("value"); PrimitiveObjectInspector keyRefOI = (PrimitiveObjectInspector) keyRef.getFieldObjectInspector(); FloatObjectInspector varRefOI = (FloatObjectInspector) valueRef.getFieldObjectInspector(); BufferedReader reader = null; try { reader = HadoopUtils.getBufferedReader(file); String line; while((line = reader.readLine()) != null) { count++; Text lineText = new Text(line); Object lineObj = serde.deserialize(lineText); List<Object> fields = lineOI.getStructFieldsDataAsList(lineObj); Object f0 = fields.get(0); Object f1 = fields.get(1); if(f0 == null || f1 == null) { continue; // avoid the case that key or value is null } Object k = keyRefOI.getPrimitiveWritableObject(keyRefOI.copyObject(f0)); float v = varRefOI.get(f1); model.set(k, new WeightValue(v, false)); } } finally { IOUtils.closeQuietly(reader); } } } return count; }
#vulnerable code private static long loadPredictionModel(PredictionModel model, File file, PrimitiveObjectInspector keyOI, WritableFloatObjectInspector valueOI) throws IOException, SerDeException { long count = 0L; if(!file.exists()) { return count; } if(!file.getName().endsWith(".crc")) { if(file.isDirectory()) { for(File f : file.listFiles()) { count += loadPredictionModel(model, f, keyOI, valueOI); } } else { LazySimpleSerDe serde = HiveUtils.getKeyValueLineSerde(keyOI, valueOI); StructObjectInspector lineOI = (StructObjectInspector) serde.getObjectInspector(); StructField keyRef = lineOI.getStructFieldRef("key"); StructField valueRef = lineOI.getStructFieldRef("value"); PrimitiveObjectInspector keyRefOI = (PrimitiveObjectInspector) keyRef.getFieldObjectInspector(); FloatObjectInspector varRefOI = (FloatObjectInspector) valueRef.getFieldObjectInspector(); final BufferedReader reader = HadoopUtils.getBufferedReader(file); try { String line; while((line = reader.readLine()) != null) { count++; Text lineText = new Text(line); Object lineObj = serde.deserialize(lineText); List<Object> fields = lineOI.getStructFieldsDataAsList(lineObj); Object f0 = fields.get(0); Object f1 = fields.get(1); if(f0 == null || f1 == null) { continue; // avoid the case that key or value is null } Object k = keyRefOI.getPrimitiveWritableObject(keyRefOI.copyObject(f0)); float v = varRefOI.get(f1); model.set(k, new WeightValue(v, false)); } } finally { reader.close(); } } } return count; } #location 38 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static Node deserializeNode(final byte[] serializedObj, final int length, final boolean compressed) throws HiveException { final Node root = new Node(); try { if (compressed) { ObjectUtils.readCompressedObject(serializedObj, 0, length, root); } else { ObjectUtils.readObject(serializedObj, length, root); } } catch (IOException ioe) { throw new HiveException("IOException cause while deserializing DecisionTree object", ioe); } catch (Exception e) { throw new HiveException("Exception cause while deserializing DecisionTree object", e); } return root; }
#vulnerable code public static Node deserializeNode(final byte[] serializedObj, final int length, final boolean compressed) throws HiveException { FastByteArrayInputStream bis = new FastByteArrayInputStream(serializedObj, length); InputStream wrapped = compressed ? new InflaterInputStream(bis) : bis; final Node root; ObjectInputStream ois = null; try { ois = new ObjectInputStream(wrapped); root = new Node(); root.readExternal(ois); } catch (IOException ioe) { throw new HiveException("IOException cause while deserializing DecisionTree object", ioe); } catch (Exception e) { throw new HiveException("Exception cause while deserializing DecisionTree object", e); } finally { IOUtils.closeQuietly(ois); } return root; } #location 18 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static void main(String[] args) throws Exception { String fileIn = null; String fileOut = null; String templateEngineKind = null; String jsonData = null; String jsonFile = null; String metadataFile = null; boolean autoGenData = false; IDataProvider dataProvider = null; String arg = null; for (int i = 0; i < args.length; i++) { arg = args[i]; if ("-in".equals(arg)) { fileIn = getValue(args, i); } else if ("-out".equals(arg)) { fileOut = getValue(args, i); } else if ("-engine".equals(arg)) { templateEngineKind = getValue(args, i); } else if ("-jsonData".equals(arg)) { jsonData = getValue(args, i); Map<String, String> parameters = new HashMap<String, String>(); parameters.put("jsonData", jsonData); dataProvider = DataProviderFactoryRegistry.getRegistry() .create("json", parameters); } else if ("-jsonFile".equals(arg)) { jsonFile = getValue(args, i); } else if ("-autoGenData".equals(arg)) { autoGenData = StringUtils.asBoolean(getValue(args, i), false); } else if ("-metadataFile".equals(arg)) { metadataFile = getValue(args, i); } } FieldsMetadata fieldsMetadata = null; if (metadataFile != null) { fieldsMetadata = FieldsMetadataXMLSerializer.getInstance().load( new FileInputStream(metadataFile)); } if (!StringUtils.isEmpty(jsonFile)) { StringWriter jsonDataWriter = new StringWriter(); File f = new File(jsonFile); if (!f.exists() && autoGenData && fieldsMetadata != null) { // Generate JSON FieldsMetadataJSONSerializer.getInstance().save(fieldsMetadata, new FileOutputStream(jsonFile), true); } IOUtils.copy(new FileReader(f), jsonDataWriter); Map<String, String> parameters = new HashMap<String, String>(); parameters.put("jsonData", jsonDataWriter.toString()); dataProvider = DataProviderFactoryRegistry.getRegistry().create( "json", parameters); } Tools tools = new Tools(); tools.process(new File(fileIn), new File(fileOut), templateEngineKind, fieldsMetadata, dataProvider); }
#vulnerable code public static void main(String[] args) throws Exception { String fileIn = null; String fileOut = null; String templateEngineKind = null; String jsonData = null; String jsonFile = null; IPopulateContextAware contextAware = null; String arg = null; for (int i = 0; i < args.length; i++) { arg = args[i]; if ("-in".equals(arg)) { fileIn = getValue(args, i); } else if ("-out".equals(arg)) { fileOut = getValue(args, i); } else if ("-engine".equals(arg)) { templateEngineKind = getValue(args, i); } else if ("-jsonData".equals(arg)) { jsonData = getValue(args, i); contextAware = new JSONPoupluateContextAware(jsonData); } else if ("-jsonFile".equals(arg)) { jsonFile = getValue(args, i); StringWriter jsonDataWriter = new StringWriter(); IOUtils.copy(new FileReader(new File(jsonFile)), jsonDataWriter); contextAware = new JSONPoupluateContextAware( jsonDataWriter.toString()); } } Tools tools = new Tools(); tools.process(new File(fileIn), new File(fileOut), templateEngineKind, contextAware); } #location 25 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testBoldWithB() throws Exception { IContext context = new MockContext(); BufferedElement parent = null; ITextStylingTransformer formatter = HTMLTextStylingTransformer.INSTANCE; IDocumentHandler handler = new DocxDocumentHandler( parent, context, "word/document.xml" ); formatter.transform( "<b>text</b>", handler ); Assert.assertEquals( "<w:r><w:rPr><w:b /></w:rPr><w:t xml:space=\"preserve\" >text</w:t></w:r>", handler.getTextBody() ); }
#vulnerable code @Test public void testBoldWithB() throws Exception { IContext context = null; BufferedElement parent = null; ITextStylingTransformer formatter = HTMLTextStylingTransformer.INSTANCE; IDocumentHandler handler = new DocxDocumentHandler( parent, context, "word/document.xml" ); formatter.transform( "<b>text</b>", handler ); Assert.assertEquals( "<w:r><w:rPr><w:b /></w:rPr><w:t xml:space=\"preserve\" >text</w:t></w:r>", handler.getTextBody() ); } #location 9 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void processReportWithOptions() throws IOException { WebClient client = WebClient.create( BASE_ADDRESS ); client.path( "processReport" ); client.accept( MediaType.APPLICATION_XML ); ReportAndDataRepresentation report = new ReportAndDataRepresentation(); InputStream in = RESTXDocReportServiceTest.class.getClassLoader().getResourceAsStream( "bo.docx" ); report.setReportID( "reportID1" ); report.setDocument( fr.opensagres.xdocreport.core.io.IOUtils.toByteArray( in ) ); report.setTemplateEngine( "Velocity" ); report.getFieldsMetaData().add( "test" ); report.setTemplateEngine( "Velocity" ); report.setDataContext( new ArrayList<DataContext>() ); WSOptions options = new WSOptions(); options.setFrom( DocumentKind.DOCX.name() ); options.setTo( ConverterTypeTo.PDF.name() ); options.setVia( ConverterTypeVia.ITEXT.name() ); report.setOptions( options ); //client.post( report); byte[] flux= client.post( report,byte[].class ); assertNotNull(flux); // // File aFile= new File( "target/result.pdf"); // FileOutputStream fos= new FileOutputStream( aFile ); // fos.write( flux ); // fos.close(); }
#vulnerable code @Test public void processReportWithOptions() throws IOException { WebClient client = WebClient.create( BASE_ADDRESS ); client.path( "processReport" ); client.accept( MediaType.APPLICATION_XML ); ReportAndDataRepresentation report = new ReportAndDataRepresentation(); InputStream in = RESTXDocReportServiceTest.class.getClassLoader().getResourceAsStream( "bo.docx" ); report.setReportID( "reportID1" ); report.setDocument( fr.opensagres.xdocreport.core.io.IOUtils.toByteArray( in ) ); report.setTemplateEngine( "Velocity" ); report.getFieldsMetaData().add( "test" ); report.setTemplateEngine( "Velocity" ); report.setDataContext( new ArrayList<DataContext>() ); WSOptions options = new WSOptions(); options.setFrom( DocumentKind.DOCX.name() ); options.setTo( ConverterTypeTo.PDF.name() ); options.setVia( ConverterTypeVia.ITEXT.name() ); report.setOptions( options ); //client.post( report); byte[] flux= client.post( report,byte[].class ); assertNotNull(flux); File aFile= new File( "result.pdf"); FileOutputStream fos= new FileOutputStream( aFile ); fos.write( flux ); fos.close(); } #location 35 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private static void processUpload( ResourcesService client, String resources, String out ) throws IOException, ResourcesException { if ( StringUtils.isEmpty( resources ) ) { throw new IOException( "resources must be not empty" ); } if ( resources.indexOf( ";" ) == -1 ) { processUpload( client, resources, new File( out ) ); } else { // TODO : manage list of uppload } // String[] resources= s.split( ";" ); // String[] outs= out.split( ";" ); }
#vulnerable code private static void processUpload( ResourcesService client, String resources, String out ) throws IOException, ResourcesException { if ( StringUtils.isEmpty( resources ) ) { throw new IOException( "resources must be not empty" ); } if ( resources.indexOf( ";" ) == -1 ) { processUpload( client, resources, IOUtils.toByteArray( new FileInputStream( new File( out ) ) ) ); } else { // TODO : manage list of uppload } // String[] resources= s.split( ";" ); // String[] outs= out.split( ";" ); } #location 10 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testItalicWithI() throws Exception { IContext context = new MockContext(); BufferedElement parent = null; ITextStylingTransformer formatter = HTMLTextStylingTransformer.INSTANCE; IDocumentHandler handler = new DocxDocumentHandler( parent, context, "word/document.xml" ); formatter.transform( "<i>text</i>", handler ); Assert.assertEquals( "<w:r><w:rPr><w:i /></w:rPr><w:t xml:space=\"preserve\" >text</w:t></w:r>", handler.getTextBody() ); }
#vulnerable code @Test public void testItalicWithI() throws Exception { IContext context = null; BufferedElement parent = null; ITextStylingTransformer formatter = HTMLTextStylingTransformer.INSTANCE; IDocumentHandler handler = new DocxDocumentHandler( parent, context, "word/document.xml" ); formatter.transform( "<i>text</i>", handler ); Assert.assertEquals( "<w:r><w:rPr><w:i /></w:rPr><w:t xml:space=\"preserve\" >text</w:t></w:r>", handler.getTextBody() ); } #location 9 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void loadPNGWithoutUsingImageSizeAndForceWidth() throws Exception { IImageProvider imageProvider = new ByteArrayImageProvider( ByteArrayImageProviderTestCase.class.getResourceAsStream( "logo.png" ) ); imageProvider.setWidth( 100f ); Assert.assertEquals( ImageFormat.png, imageProvider.getImageFormat() ); Assert.assertNotNull( imageProvider.getWidth(null) ); Assert.assertEquals( 100f, imageProvider.getWidth(null).floatValue(), 0 ); Assert.assertNull( imageProvider.getHeight(null) ); }
#vulnerable code @Test public void loadPNGWithoutUsingImageSizeAndForceWidth() throws Exception { IImageProvider imageProvider = new ByteArrayImageProvider( ByteArrayImageProviderTestCase.class.getResourceAsStream( "logo.png" ) ); imageProvider.setWidth( 100f ); Assert.assertEquals( ImageFormat.png, imageProvider.getImageFormat() ); Assert.assertNotNull( imageProvider.getWidth(0) ); Assert.assertEquals( 100f, imageProvider.getWidth(0).floatValue(), 0 ); Assert.assertNull( imageProvider.getHeight(0) ); } #location 10 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testBoldWithStrong() throws Exception { IContext context = new MockContext(); BufferedElement parent = null; ITextStylingTransformer formatter = HTMLTextStylingTransformer.INSTANCE; IDocumentHandler handler = new DocxDocumentHandler( parent, context, "word/document.xml" ); formatter.transform( "<strong>text</strong>", handler ); Assert.assertEquals( "<w:r><w:rPr><w:b /></w:rPr><w:t xml:space=\"preserve\" >text</w:t></w:r>", handler.getTextBody() ); }
#vulnerable code @Test public void testBoldWithStrong() throws Exception { IContext context = null; BufferedElement parent = null; ITextStylingTransformer formatter = HTMLTextStylingTransformer.INSTANCE; IDocumentHandler handler = new DocxDocumentHandler( parent, context, "word/document.xml" ); formatter.transform( "<strong>text</strong>", handler ); Assert.assertEquals( "<w:r><w:rPr><w:b /></w:rPr><w:t xml:space=\"preserve\" >text</w:t></w:r>", handler.getTextBody() ); } #location 9 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void loadPNGWithoutUsingImageSizeAndForceWidth() throws Exception { IImageProvider imageProvider = new ClassPathImageProvider( ClassPathImageProviderTestCase.class, "logo.png" ); imageProvider.setWidth( 100f ); Assert.assertEquals( ImageFormat.png, imageProvider.getImageFormat() ); Assert.assertNotNull( imageProvider.getWidth(null) ); Assert.assertEquals( 100f, imageProvider.getWidth(null).floatValue(), 0 ); Assert.assertNull( imageProvider.getHeight(null) ); }
#vulnerable code @Test public void loadPNGWithoutUsingImageSizeAndForceWidth() throws Exception { IImageProvider imageProvider = new ClassPathImageProvider( ClassPathImageProviderTestCase.class, "logo.png" ); imageProvider.setWidth( 100f ); Assert.assertEquals( ImageFormat.png, imageProvider.getImageFormat() ); Assert.assertNotNull( imageProvider.getWidth(0) ); Assert.assertEquals( 100f, imageProvider.getWidth(0).floatValue(), 0 ); Assert.assertNull( imageProvider.getHeight(0) ); } #location 9 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testItalicWithEm() throws Exception { IContext context = new MockContext(); BufferedElement parent = null; ITextStylingTransformer formatter = HTMLTextStylingTransformer.INSTANCE; IDocumentHandler handler = new DocxDocumentHandler( parent, context, "word/document.xml" ); formatter.transform( "<em>text</em>", handler ); Assert.assertEquals( "<w:r><w:rPr><w:i /></w:rPr><w:t xml:space=\"preserve\" >text</w:t></w:r>", handler.getTextBody() ); }
#vulnerable code @Test public void testItalicWithEm() throws Exception { IContext context = null; BufferedElement parent = null; ITextStylingTransformer formatter = HTMLTextStylingTransformer.INSTANCE; IDocumentHandler handler = new DocxDocumentHandler( parent, context, "word/document.xml" ); formatter.transform( "<em>text</em>", handler ); Assert.assertEquals( "<w:r><w:rPr><w:i /></w:rPr><w:t xml:space=\"preserve\" >text</w:t></w:r>", handler.getTextBody() ); } #location 9 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public void process() { Collection<BufferedElement> toRemove = new ArrayList<BufferedElement>(); int size = arBufferedRegions.size(); String s = null; StringBuilder fullContent = new StringBuilder(); boolean fieldFound = false; ARBufferedRegion currentAR = null; ARBufferedRegion lastAR = null; for (int i = 0; i < size; i++) { currentAR = arBufferedRegions.get(i); s = currentAR.getTContent(); if (fieldFound) { fieldFound = !(s == null || s.length() == 0 || Character .isWhitespace(s.charAt(0))); } else { fieldFound = s != null && s.indexOf("$") != -1; } if (fieldFound) { fullContent.append(s); toRemove.add(currentAR); } else { update(toRemove, fullContent, lastAR); } lastAR = currentAR; } update(toRemove, fullContent, lastAR); super.removeAll(toRemove); }
#vulnerable code public void process() { Collection<BufferedElement> toRemove = new ArrayList<BufferedElement>(); int size = arBufferedRegions.size(); String s = null; StringBuilder fullContent = new StringBuilder(); boolean fieldFound = false; ARBufferedRegion currentAR = null; ARBufferedRegion lastAR = null; for (int i = 0; i < size; i++) { currentAR = arBufferedRegions.get(i); s = currentAR.getTContent(); if (fieldFound) { fieldFound = !(s == null || s.length() == 0 || Character .isWhitespace(s.charAt(0))); } else { fieldFound = s != null && s.indexOf("$") != -1; } if (fieldFound) { fullContent.append(s); toRemove.add(currentAR); } else { if (fullContent.length() > 0) { lastAR.setTContent(fullContent.toString()); fullContent.setLength(0); toRemove.remove(lastAR); } } lastAR = currentAR; } if (fullContent.length() > 0) { lastAR.setTContent(fullContent.toString()); fullContent.setLength(0); toRemove.remove(lastAR); } super.removeAll(toRemove); } #location 24 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @RequestMapping(path = "/create", method = RequestMethod.GET) public String createViewForm(final ViewForm viewForm, final Model model) { // Setup breadcrumbs if (!model.containsAttribute("BreadCrumbs")) { setupBreadCrumbs(model, "Create", null); } // Retrieve all clusters model.addAttribute("clusters", clusterRepository.findAllByOrderByNameAsc()); // Retrieve all message formats model.addAttribute("defaultMessageFormats", messageFormatRepository.findByIsDefaultFormatOrderByNameAsc(true)); model.addAttribute("customMessageFormats", messageFormatRepository.findByIsDefaultFormatOrderByNameAsc(false)); // If we have a cluster Id model.addAttribute("topics", new ArrayList<>()); model.addAttribute("partitions", new ArrayList<>()); // Retrieve all filters model.addAttribute("filterList", filterRepository.findAllByOrderByNameAsc()); model.addAttribute("filterParameters", new HashMap<Long, Map<String, String>>()); if (viewForm.getClusterId() != null) { // Lets load the topics now // Retrieve cluster clusterRepository.findById(viewForm.getClusterId()).ifPresent((cluster) -> { try (final KafkaOperations operations = kafkaOperationsFactory.create(cluster, getLoggedInUserId())) { final TopicList topics = operations.getAvailableTopics(); model.addAttribute("topics", topics.getTopics()); // If we have a selected topic if (viewForm.getTopic() != null && !"!".equals(viewForm.getTopic())) { final TopicDetails topicDetails = operations.getTopicDetails(viewForm.getTopic()); model.addAttribute("partitions", topicDetails.getPartitions()); } } }); } return "configuration/view/create"; }
#vulnerable code @RequestMapping(path = "/create", method = RequestMethod.GET) public String createViewForm(final ViewForm viewForm, final Model model) { // Setup breadcrumbs if (!model.containsAttribute("BreadCrumbs")) { setupBreadCrumbs(model, "Create", null); } // Retrieve all clusters model.addAttribute("clusters", clusterRepository.findAllByOrderByNameAsc()); // Retrieve all message formats model.addAttribute("defaultMessageFormats", messageFormatRepository.findByIsDefaultFormatOrderByNameAsc(true)); model.addAttribute("customMessageFormats", messageFormatRepository.findByIsDefaultFormatOrderByNameAsc(false)); // If we have a cluster Id model.addAttribute("topics", new ArrayList<>()); model.addAttribute("partitions", new ArrayList<>()); // Retrieve all filters model.addAttribute("filterList", filterRepository.findAllByOrderByNameAsc()); model.addAttribute("filterParameters", new HashMap<Long, Map<String, String>>()); if (viewForm.getClusterId() != null) { // Lets load the topics now // Retrieve cluster final Cluster cluster = clusterRepository.findOne(viewForm.getClusterId()); if (cluster != null) { try (final KafkaOperations operations = kafkaOperationsFactory.create(cluster, getLoggedInUserId())) { final TopicList topics = operations.getAvailableTopics(); model.addAttribute("topics", topics.getTopics()); // If we have a selected topic if (viewForm.getTopic() != null && !"!".equals(viewForm.getTopic())) { final TopicDetails topicDetails = operations.getTopicDetails(viewForm.getTopic()); model.addAttribute("partitions", topicDetails.getPartitions()); } } } } return "configuration/view/create"; } #location 35 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void SampleTest2() throws IOException, COSVisitorException { //Set margins float margin = 10; List<String[]> facts = getFacts(); //A list of bookmarks of all the tables List<PDOutlineItem> bookmarks = new ArrayList<PDOutlineItem>(); //Initialize Document PDDocument doc = new PDDocument(); PDPage page = addNewPage(doc); //Initialize table float tableWidth = page.findMediaBox().getWidth()-(2*margin); float yStartNewPage = page.findMediaBox().getHeight() - (2 * margin); boolean drawContent = true; boolean drawLines = true; float yStart = yStartNewPage; float bottomMargin = 70; BaseTable table = new BaseTable(yStart,yStartNewPage,bottomMargin,tableWidth, margin, doc, page, drawLines, drawContent); //Create Header row Row headerRow = table.createRow(15f); Cell cell = headerRow.createCell(100,"Awesome Facts About Belgium"); cell.setFont(PDType1Font.HELVETICA_BOLD); cell.setFillColor(Color.BLACK);cell.setTextColor(Color.WHITE); table.setHeader(headerRow); //Create 2 column row Row row = table.createRow(15f); cell = row.createCell(75,"Source:"); cell.setFont(PDType1Font.HELVETICA); cell = row.createCell(25,"http://www.factsofbelgium.com/"); cell.setFont(PDType1Font.HELVETICA_OBLIQUE); //Create Fact header row Row factHeaderrow = table.createRow(15f); cell = factHeaderrow.createCell((100/3) * 2 ,"Fact"); cell.setFont(PDType1Font.HELVETICA); cell.setFontSize(6); cell.setFillColor(Color.LIGHT_GRAY); cell = factHeaderrow.createCell((100/3),"Tags"); cell.setFillColor(Color.LIGHT_GRAY); cell.setFont(PDType1Font.HELVETICA_OBLIQUE);cell.setFontSize(6); //Add multiple rows with random facts about Belgium int bookmarkid = 0; for(String[] fact : facts) { row = table.createRow(10f); cell = row.createCell((100/3)*2 ,fact[0]+ " " + fact[0]+ " " + fact[0]); cell.setFont(PDType1Font.HELVETICA);cell.setFontSize(6); //Create a bookmark for each record PDOutlineItem outlineItem = new PDOutlineItem(); outlineItem.setTitle((++bookmarkid ) + ") " + fact[0]); row.setBookmark( outlineItem); for(int i = 1; i< fact.length; i++) { cell = row.createCell((100/9) ,fact[i]); cell.setFont(PDType1Font.HELVETICA_OBLIQUE);cell.setFontSize(6); //Set colors if(fact[i].contains("beer"))cell.setFillColor(Color.yellow); if(fact[i].contains("champion"))cell.setTextColor(Color.GREEN); } } table.draw(); //Get all bookmarks of previous table bookmarks.addAll(table.getBookmarks()); //Create document outline PDDocumentOutline outline = new PDDocumentOutline(); for(PDOutlineItem bm : bookmarks) { outline.appendChild(bm); } doc.getDocumentCatalog().setDocumentOutline(outline); //Save the document File file = new File("target/BoxableSample2.pdf"); Files.createParentDirs(file); doc.save(file); doc.close(); }
#vulnerable code @Test public void SampleTest2() throws IOException, COSVisitorException { //Set margins float margin = 10; List<String[]> facts = getFacts(); //A list of bookmarks of all the tables List<PDOutlineItem> bookmarks = new ArrayList<PDOutlineItem>(); //Initialize Document PDDocument doc = new PDDocument(); PDPage page = addNewPage(doc); //Initialize table float tableWidth = page.findMediaBox().getWidth()-(2*margin); float top = page.findMediaBox().getHeight() - (2 * margin); boolean drawContent = true; boolean drawLines = true; Table table = new Table(top,tableWidth, margin, doc, page, drawLines, drawContent); //Create Header row Row headerRow = table.createRow(15f); Cell cell = headerRow.createCell(100,"Awesome Facts About Belgium"); cell.setFont(PDType1Font.HELVETICA_BOLD); cell.setFillColor(Color.BLACK);cell.setTextColor(Color.WHITE); table.setHeader(headerRow); //Create 2 column row Row row = table.createRow(15f); cell = row.createCell(75,"Source:"); cell.setFont(PDType1Font.HELVETICA); cell = row.createCell(25,"http://www.factsofbelgium.com/"); cell.setFont(PDType1Font.HELVETICA_OBLIQUE); //Create Fact header row Row factHeaderrow = table.createRow(15f); cell = factHeaderrow.createCell((100/3) * 2 ,"Fact"); cell.setFont(PDType1Font.HELVETICA); cell.setFontSize(6); cell.setFillColor(Color.LIGHT_GRAY); cell = factHeaderrow.createCell((100/3),"Tags"); cell.setFillColor(Color.LIGHT_GRAY); cell.setFont(PDType1Font.HELVETICA_OBLIQUE);cell.setFontSize(6); //Add multiple rows with random facts about Belgium int bookmarkid = 0; for(String[] fact : facts) { row = table.createRow(10f); cell = row.createCell((100/3)*2 ,fact[0]+ " " + fact[0]+ " " + fact[0]); cell.setFont(PDType1Font.HELVETICA);cell.setFontSize(6); //Create a bookmark for each record PDOutlineItem outlineItem = new PDOutlineItem(); outlineItem.setTitle((++bookmarkid ) + ") " + fact[0]); row.setBookmark( outlineItem); for(int i = 1; i< fact.length; i++) { cell = row.createCell((100/9) ,fact[i]); cell.setFont(PDType1Font.HELVETICA_OBLIQUE);cell.setFontSize(6); //Set colors if(fact[i].contains("beer"))cell.setFillColor(Color.yellow); if(fact[i].contains("champion"))cell.setTextColor(Color.GREEN); } } table.draw(); //Get all bookmarks of previous table bookmarks.addAll(table.getBookmarks()); //Create document outline PDDocumentOutline outline = new PDDocumentOutline(); for(PDOutlineItem bm : bookmarks) { outline.appendChild(bm); } doc.getDocumentCatalog().setDocumentOutline(outline); //Save the document File file = new File("target/BoxableSample2.pdf"); Files.createParentDirs(file); doc.save(file); doc.close(); } #location 72 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void Sample1 () throws IOException, COSVisitorException { //Set margins float margin = 10; List<String[]> facts = getFacts(); //Initialize Document PDDocument doc = new PDDocument(); PDPage page = addNewPage(doc); float yStartNewPage = page.findMediaBox().getHeight() - (2 * margin); //Initialize table float tableWidth = page.findMediaBox().getWidth() - (2 * margin); boolean drawContent = false; float yStart = yStartNewPage; float bottomMargin = 70; BaseTable table = new BaseTable(yStart,yStartNewPage, bottomMargin, tableWidth, margin, doc, page, true, drawContent); //Create Header row Row headerRow = table.createRow(15f); Cell cell = headerRow.createCell(100, "Awesome Facts About Belgium"); cell.setFont(PDType1Font.HELVETICA_BOLD); cell.setFillColor(Color.BLACK); cell.setTextColor(Color.WHITE); table.setHeader(headerRow); //Create 2 column row Row row = table.createRow(15f); cell = row.createCell(30,"Source:"); cell.setFont(PDType1Font.HELVETICA); cell = row.createCell(70, "http://www.factsofbelgium.com/"); cell.setFont(PDType1Font.HELVETICA_OBLIQUE); //Create Fact header row Row factHeaderrow = table.createRow(15f); cell = factHeaderrow.createCell((100 / 3) * 2, "Fact"); cell.setFont(PDType1Font.HELVETICA); cell.setFontSize(6); cell.setFillColor(Color.LIGHT_GRAY); cell = factHeaderrow.createCell((100 / 3), "Tags"); cell.setFillColor(Color.LIGHT_GRAY); cell.setFont(PDType1Font.HELVETICA_OBLIQUE); cell.setFontSize(6); //Add multiple rows with random facts about Belgium for (String[] fact : facts) { row = table.createRow(10f); cell = row.createCell((100 / 3) * 2, fact[0]); cell.setFont(PDType1Font.HELVETICA); cell.setFontSize(6); for (int i = 1; i < fact.length; i++) { cell = row.createCell((100 / 9), fact[i]); cell.setFont(PDType1Font.HELVETICA_OBLIQUE); cell.setFontSize(6); //Set colors if (fact[i].contains("beer")) cell.setFillColor(Color.yellow); if (fact[i].contains("champion")) cell.setTextColor(Color.GREEN); } } table.draw(); //Close Stream and save pdf File file = new File("target/BoxableSample1.pdf"); Files.createParentDirs(file); doc.save(file); doc.close(); }
#vulnerable code @Test public void Sample1 () throws IOException, COSVisitorException { //Set margins float margin = 10; List<String[]> facts = getFacts(); //Initialize Document PDDocument doc = new PDDocument(); PDPage page = addNewPage(doc); float top = page.findMediaBox().getHeight() - (2 * margin); //Initialize table float tableWidth = page.findMediaBox().getWidth() - (2 * margin); boolean drawContent = false; Table table = new Table(top,tableWidth, margin, doc, page, true, drawContent); //Create Header row Row headerRow = table.createRow(15f); Cell cell = headerRow.createCell(100, "Awesome Facts About Belgium"); cell.setFont(PDType1Font.HELVETICA_BOLD); cell.setFillColor(Color.BLACK); cell.setTextColor(Color.WHITE); table.setHeader(headerRow); //Create 2 column row Row row = table.createRow(15f); cell = row.createCell(30,"Source:"); cell.setFont(PDType1Font.HELVETICA); cell = row.createCell(70, "http://www.factsofbelgium.com/"); cell.setFont(PDType1Font.HELVETICA_OBLIQUE); //Create Fact header row Row factHeaderrow = table.createRow(15f); cell = factHeaderrow.createCell((100 / 3) * 2, "Fact"); cell.setFont(PDType1Font.HELVETICA); cell.setFontSize(6); cell.setFillColor(Color.LIGHT_GRAY); cell = factHeaderrow.createCell((100 / 3), "Tags"); cell.setFillColor(Color.LIGHT_GRAY); cell.setFont(PDType1Font.HELVETICA_OBLIQUE); cell.setFontSize(6); //Add multiple rows with random facts about Belgium for (String[] fact : facts) { row = table.createRow(10f); cell = row.createCell((100 / 3) * 2, fact[0]); cell.setFont(PDType1Font.HELVETICA); cell.setFontSize(6); for (int i = 1; i < fact.length; i++) { cell = row.createCell((100 / 9), fact[i]); cell.setFont(PDType1Font.HELVETICA_OBLIQUE); cell.setFontSize(6); //Set colors if (fact[i].contains("beer")) cell.setFillColor(Color.yellow); if (fact[i].contains("champion")) cell.setTextColor(Color.GREEN); } } table.draw(); //Close Stream and save pdf File file = new File("target/BoxableSample1.pdf"); Files.createParentDirs(file); doc.save(file); doc.close(); } #location 67 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public void drawTitle(String title, PDFont font, int fontSize, TextType textType) throws IOException { PDPageContentStream articleTitle = createPdPageContentStream(); articleTitle.beginText(); articleTitle.setFont(font, fontSize); articleTitle.moveTextPositionByAmount(getMargin(), yStart); articleTitle.setNonStrokingColor(Color.black); articleTitle.drawString(title); articleTitle.endText(); if (textType != null) { switch (textType) { case HIGHLIGHT: throw new NotImplementedException(); case SQUIGGLY: throw new NotImplementedException(); case STRIKEOUT: throw new NotImplementedException(); case UNDERLINE: float y = (float) (yStart - 1.5); float titleWidth = font.getStringWidth(title) / 1000 * fontSize; articleTitle.drawLine(getMargin(), y, getMargin() + titleWidth, y); break; default: break; } } articleTitle.close(); yStart = (float) (yStart - (fontSize / 1.5)); }
#vulnerable code public void drawTitle(String title, PDFont font, int fontSize, TextType textType) throws IOException { PDPageContentStream articleTitle = new PDPageContentStream(this.document, this.currentPage, true, true); articleTitle.beginText(); articleTitle.setFont(font, fontSize); articleTitle.moveTextPositionByAmount(getMargin(), yStart); articleTitle.setNonStrokingColor(Color.black); articleTitle.drawString(title); articleTitle.endText(); if (textType != null) { switch (textType) { case HIGHLIGHT: throw new NotImplementedException(); case SQUIGGLY: throw new NotImplementedException(); case STRIKEOUT: throw new NotImplementedException(); case UNDERLINE: float y = (float) (yStart - 1.5); float titleWidth = font.getStringWidth(title) / 1000 * fontSize; articleTitle.drawLine(getMargin(), y, getMargin() + titleWidth, y); break; default: break; } } articleTitle.close(); yStart = (float) (yStart - (fontSize / 1.5)); } #location 14 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code Optional<Map<ChunkReference, Chunk>> fetch(HttpClient httpClient, ChunkKeyEncryptionKeys keks, Map<Integer, StorageHostChunkList> containers, Asset asset) { Map<ChunkReference, Chunk> map = new HashMap<>(); for (Map.Entry<Integer, StorageHostChunkList> entry : containers.entrySet()) { Optional<Map<ChunkReference, Chunk>> chunks = keks.apply(entry.getValue()) .flatMap(kek -> fetch(httpClient, kek, entry.getValue(), entry.getKey())); if (!chunks.isPresent()) { return Optional.empty(); } map.putAll(chunks.get()); } return Optional.of(map); }
#vulnerable code Optional<List<Chunk>> assemble(Map<ChunkReference, Chunk> map, List<ChunkReference> references) { if (map.keySet().containsAll(references)) { logger.warn("-- assemble() - missing chunks"); return Optional.empty(); } List<Chunk> chunkList = references.stream() .map(map::get) .collect(Collectors.toList()); return Optional.of(chunkList); } #location 4 #vulnerability type INTERFACE_NOT_THREAD_SAFE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static int runCommand(final CCTask task, final File workingDir, final String[] cmdline, final boolean newEnvironment, final Environment env) throws BuildException { try { task.log(Commandline.toString(cmdline), task.getCommandLogLevel()); /* final Execute exe = new Execute(new LogStreamHandler(task, Project.MSG_INFO, Project.MSG_ERR)); if (System.getProperty("os.name").equals("OS/390")) { exe.setVMLauncher(false); } exe.setAntRun(task.getProject()); exe.setCommandline(cmdline); exe.setWorkingDirectory(workingDir); if (env != null) { final String[] environment = env.getVariables(); if (environment != null) { for (final String element : environment) { task.log("Setting environment variable: " + element, Project.MSG_VERBOSE); } } exe.setEnvironment(environment); } exe.setNewenvironment(newEnvironment); return exe.execute(); */ return CommandExecution.runCommand(cmdline,workingDir,task); } catch (final java.io.IOException exc) { throw new BuildException("Could not launch " + cmdline[0] + ": " + exc, task.getLocation()); } }
#vulnerable code public static int runCommand(final CCTask task, final File workingDir, final String[] cmdline, final boolean newEnvironment, final Environment env) throws BuildException { try { task.log(Commandline.toString(cmdline), task.getCommandLogLevel()); final Execute exe = new Execute(new LogStreamHandler(task, Project.MSG_INFO, Project.MSG_ERR)); if (System.getProperty("os.name").equals("OS/390")) { exe.setVMLauncher(false); } exe.setAntRun(task.getProject()); exe.setCommandline(cmdline); exe.setWorkingDirectory(workingDir); if (env != null) { final String[] environment = env.getVariables(); if (environment != null) { for (final String element : environment) { task.log("Setting environment variable: " + element, Project.MSG_VERBOSE); } } exe.setEnvironment(environment); } exe.setNewenvironment(newEnvironment); return exe.execute(); } catch (final java.io.IOException exc) { throw new BuildException("Could not launch " + cmdline[0] + ": " + exc, task.getLocation()); } } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public final List/* <AttachedNarArtifact> */getAttachedNarDependencies( List/* <NarArtifacts> */narArtifacts, AOL archOsLinker, String type ) throws MojoExecutionException, MojoFailureException { boolean noarch = false; AOL aol = archOsLinker; if ( aol == null ) { noarch = true; aol = defaultAOL; } List artifactList = new ArrayList(); for ( Iterator i = narArtifacts.iterator(); i.hasNext(); ) { Artifact dependency = (Artifact) i.next(); if ( noarch ) { artifactList.addAll( getAttachedNarDependencies( dependency, null, NarConstants.NAR_NO_ARCH ) ); } // FIXME kludge, but does not work anymore since AOL is now a class if ( aol.equals( NarConstants.NAR_NO_ARCH ) ) { // FIXME no handling of local artifactList.addAll( getAttachedNarDependencies( dependency, null, NarConstants.NAR_NO_ARCH ) ); } else { if ( type != null ) { artifactList.addAll( getAttachedNarDependencies( dependency, aol, type ) ); } else { for ( int j = 0; j < narTypes.length; j++ ) { artifactList.addAll( getAttachedNarDependencies( dependency, aol, narTypes[j] )); } } } } return artifactList; }
#vulnerable code public final List/* <AttachedNarArtifact> */getAttachedNarDependencies( List/* <NarArtifacts> */narArtifacts, AOL archOsLinker, String type ) throws MojoExecutionException, MojoFailureException { boolean noarch = false; AOL aol = archOsLinker; if ( aol == null ) { noarch = true; aol = defaultAOL; } List artifactList = new ArrayList(); for ( Iterator i = narArtifacts.iterator(); i.hasNext(); ) { Artifact dependency = (Artifact) i.next(); NarInfo narInfo = getNarInfo( dependency ); if ( noarch ) { artifactList.addAll( getAttachedNarDependencies( dependency, null, NarConstants.NAR_NO_ARCH ) ); } // use preferred binding, unless non existing. String binding = narInfo.getBinding( aol, type != null ? type : Library.STATIC ); // FIXME kludge, but does not work anymore since AOL is now a class if ( aol.equals( NarConstants.NAR_NO_ARCH ) ) { // FIXME no handling of local artifactList.addAll( getAttachedNarDependencies( dependency, null, NarConstants.NAR_NO_ARCH ) ); } else { artifactList.addAll( getAttachedNarDependencies( dependency, aol, binding ) ); } } return artifactList; } #location 24 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private boolean isClang() { final String command = getCommand(); if (command == null) { return false; } if (command.startsWith("clang")) { return true; } if (!GPP_COMMAND.equals(command)) { return false; } final String[] cmd = { command, "--version" }; final String[] cmdout = CaptureStreamHandler.execute(cmd).getStdout(); return cmdout != null && cmdout.length > 0 && cmdout[0].contains("(clang-"); }
#vulnerable code private boolean isClang() { final String command = getCommand(); if (command == null) { return false; } if (command.startsWith("clang")) { return true; } if (!GPP_COMMAND.equals(command)) { return false; } final String[] cmd = { command, "--version" }; final String[] cmdout = CaptureStreamHandler.execute(cmd).getStdout(); return cmdout.length > 0 && cmdout[0].contains("(clang-"); } #location 16 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); super.open(stormConf, context, collector); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); // one ES client per JVM synchronized (AggregationSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { logIdprefix = "[" + context.getThisComponentId() + " #" + context.getThisTaskIndex() + "] "; // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("{} assigned shard ID {}", logIdprefix, shardID); } _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 10 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void nextTuple() { if (!active) return; // synchronize access to buffer needed in case of asynchronous // queries to the backend synchronized (buffer) { // force the refresh of the buffer even if the buffer is not empty if (!isInQuery.get() && triggerQueries()) { populateBuffer(); } if (!buffer.isEmpty()) { // track how long the buffer had been empty for if (timestampEmptyBuffer != -1) { eventCounter.scope("empty.buffer").incrBy( System.currentTimeMillis() - timestampEmptyBuffer); timestampEmptyBuffer = -1; } List<Object> fields = buffer.remove(); String url = fields.get(0).toString(); this._collector.emit(fields, url); beingProcessed.put(url, null); in_buffer.remove(url); eventCounter.scope("emitted").incrBy(1); return; } else if (timestampEmptyBuffer == -1) { timestampEmptyBuffer = System.currentTimeMillis(); } } if (isInQuery.get() || throttleQueries() > 0) { // sleep for a bit but not too much in order to give ack/fail a // chance Utils.sleep(10); return; } // re-populate the buffer populateBuffer(); timeLastQuerySent = System.currentTimeMillis(); }
#vulnerable code @Override public void nextTuple() { if (!active) return; // synchronize access to buffer needed in case of asynchronous // queries to the backend synchronized (buffer) { if (!buffer.isEmpty()) { // track how long the buffer had been empty for if (timestampEmptyBuffer != -1) { eventCounter.scope("empty.buffer").incrBy( System.currentTimeMillis() - timestampEmptyBuffer); timestampEmptyBuffer = -1; } List<Object> fields = buffer.remove(); String url = fields.get(0).toString(); this._collector.emit(fields, url); beingProcessed.put(url, null); eventCounter.scope("emitted").incrBy(1); return; } else if (timestampEmptyBuffer == -1) { timestampEmptyBuffer = System.currentTimeMillis(); } } if (isInQuery.get() || throttleQueries() > 0) { // sleep for a bit but not too much in order to give ack/fail a // chance Utils.sleep(10); return; } // re-populate the buffer populateBuffer(); timeLastQuery = System.currentTimeMillis(); } #location 37 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeStartESQuery; Aggregations aggregs = response.getAggregations(); SingleBucketAggregation sample = aggregs.get("sample"); if (sample != null) { aggregs = sample.getAggregations(); } Terms agg = aggregs.get("partition"); int numhits = 0; int numBuckets = 0; int alreadyprocessed = 0; Date mostRecentDateFound = null; SimpleDateFormat formatter = new SimpleDateFormat( "yyyy-MM-dd'T'HH:mm:ss.SSSX"); synchronized (buffer) { // For each entry for (Terms.Bucket entry : agg.getBuckets()) { String key = (String) entry.getKey(); // bucket key long docCount = entry.getDocCount(); // Doc count int hitsForThisBucket = 0; // filter results so that we don't include URLs we are already // being processed TopHits topHits = entry.getAggregations().get("docs"); for (SearchHit hit : topHits.getHits().getHits()) { hitsForThisBucket++; Map<String, Object> keyValues = hit.sourceAsMap(); String url = (String) keyValues.get("url"); // 2017-04-06T10:14:28.662Z String strDate = (String) keyValues.get("nextFetchDate"); try { Date nextFetchDate = formatter.parse(strDate); if (mostRecentDateFound == null || nextFetchDate.after(mostRecentDateFound)) { mostRecentDateFound = nextFetchDate; } } catch (ParseException e) { throw new RuntimeException("can't parse date :" + strDate); } LOG.debug("{} -> id [{}], _source [{}]", logIdprefix, hit.getId(), hit.getSourceAsString()); // is already being processed - skip it! if (beingProcessed.containsKey(url)) { alreadyprocessed++; continue; } Metadata metadata = fromKeyValues(keyValues); buffer.add(new Values(url, metadata)); } if (hitsForThisBucket > 0) numBuckets++; numhits += hitsForThisBucket; LOG.debug("{} key [{}], hits[{}], doc_count [{}]", logIdprefix, key, hitsForThisBucket, docCount, alreadyprocessed); } // Shuffle the URLs so that we don't get blocks of URLs from the // same // host or domain Collections.shuffle((List) buffer); } LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numhits, numBuckets, timeTaken, alreadyprocessed); esQueryTimes.addMeasurement(timeTaken); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numhits); // optimise the nextFetchDate by getting the most recent value // returned in the query and add to it, unless the previous value is // within n mins in which case we'll keep it if (mostRecentDateFound != null && recentDateIncrease >= 0) { Calendar cal = Calendar.getInstance(); cal.setTime(mostRecentDateFound); cal.add(Calendar.MINUTE, recentDateIncrease); Date potentialNewDate = cal.getTime(); Date oldDate = null; // check boundaries if (this.recentDateMinGap > 0) { Calendar low = Calendar.getInstance(); low.setTime(lastDate); low.add(Calendar.MINUTE, -recentDateMinGap); Calendar high = Calendar.getInstance(); high.setTime(lastDate); high.add(Calendar.MINUTE, recentDateMinGap); if (high.before(potentialNewDate) || low.after(potentialNewDate)) { oldDate = lastDate; lastDate = potentialNewDate; } } else { oldDate = lastDate; lastDate = potentialNewDate; } if (oldDate != null) { LOG.info( "{} lastDate changed from {} to {} based on mostRecentDateFound {}", logIdprefix, oldDate, lastDate, mostRecentDateFound); } } // change the date only if we don't get any results at all if (numBuckets == 0) { lastDate = null; } // remove lock isInESQuery.set(false); }
#vulnerable code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeStartESQuery; Aggregations aggregs = response.getAggregations(); SingleBucketAggregation sample = aggregs.get("sample"); if (sample != null) { aggregs = sample.getAggregations(); } Terms agg = aggregs.get("partition"); int numhits = 0; int numBuckets = 0; int alreadyprocessed = 0; synchronized (buffer) { // For each entry for (Terms.Bucket entry : agg.getBuckets()) { String key = (String) entry.getKey(); // bucket key long docCount = entry.getDocCount(); // Doc count int hitsForThisBucket = 0; // filter results so that we don't include URLs we are already // being processed TopHits topHits = entry.getAggregations().get("docs"); for (SearchHit hit : topHits.getHits().getHits()) { hitsForThisBucket++; Map<String, Object> keyValues = hit.sourceAsMap(); String url = (String) keyValues.get("url"); LOG.debug("{} -> id [{}], _source [{}]", logIdprefix, hit.getId(), hit.getSourceAsString()); // is already being processed - skip it! if (beingProcessed.containsKey(url)) { alreadyprocessed++; continue; } Metadata metadata = fromKeyValues(keyValues); buffer.add(new Values(url, metadata)); } if (hitsForThisBucket > 0) numBuckets++; numhits += hitsForThisBucket; LOG.debug("{} key [{}], hits[{}], doc_count [{}]", logIdprefix, key, hitsForThisBucket, docCount, alreadyprocessed); } // Shuffle the URLs so that we don't get blocks of URLs from the // same // host or domain Collections.shuffle((List) buffer); } LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numhits, numBuckets, timeTaken, alreadyprocessed); esQueryTimes.addMeasurement(timeTaken); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numhits); // change the date only if we don't get any results at all if (numBuckets == 0) { lastDate = null; } // remove lock isInESQuery.set(false); } #location 73 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override protected void populateBuffer() { // not used yet or returned empty results if (queryDate == null) { queryDate = new Date(); lastTimeResetToNOW = Instant.now(); lastStartOffset = 0; } // been running same query for too long and paging deep? else if (maxStartOffset != -1 && lastStartOffset > maxStartOffset) { LOG.info("Reached max start offset {}", lastStartOffset); lastStartOffset = 0; } String formattedLastDate = ISODateTimeFormat.dateTimeNoMillis().print( queryDate.getTime()); LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix, formattedLastDate); QueryBuilder queryBuilder = QueryBuilders.rangeQuery("nextFetchDate") .lte(formattedLastDate); if (filterQuery != null) { queryBuilder = boolQuery().must(queryBuilder).filter( QueryBuilders.queryStringQuery(filterQuery)); } SearchRequest request = new SearchRequest(indexName).types(docType) .searchType(SearchType.QUERY_THEN_FETCH); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.query(queryBuilder); sourceBuilder.from(lastStartOffset); sourceBuilder.size(maxBucketNum); sourceBuilder.explain(false); sourceBuilder.trackTotalHits(false); // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html // _shards:2,3 if (shardID != -1) { request.preference("_shards:" + shardID); } if (StringUtils.isNotBlank(totalSortField)) { sourceBuilder.sort(new FieldSortBuilder(totalSortField) .order(SortOrder.ASC)); } CollapseBuilder collapse = new CollapseBuilder(partitionField); // group expansion -> sends sub queries for each bucket if (maxURLsPerBucket > 1) { InnerHitBuilder ihb = new InnerHitBuilder(); ihb.setSize(maxURLsPerBucket); ihb.setName("urls_per_bucket"); // sort within a bucket if (StringUtils.isNotBlank(bucketSortField)) { List<SortBuilder<?>> sorts = new LinkedList<>(); FieldSortBuilder bucketsorter = SortBuilders.fieldSort( bucketSortField).order(SortOrder.ASC); sorts.add(bucketsorter); ihb.setSorts(sorts); } collapse.setInnerHits(ihb); } sourceBuilder.collapse(collapse); request.source(sourceBuilder); // dump query to log LOG.debug("{} ES query {}", logIdprefix, request.toString()); isInQuery.set(true); client.searchAsync(request, this); }
#vulnerable code @Override protected void populateBuffer() { // not used yet or returned empty results if (lastDate == null) { lastDate = new Date(); lastStartOffset = 0; } // been running same query for too long and paging deep? else if (maxStartOffset != -1 && lastStartOffset > maxStartOffset) { LOG.info("Reached max start offset {}", lastStartOffset); lastStartOffset = 0; } String formattedLastDate = ISODateTimeFormat.dateTimeNoMillis().print( lastDate.getTime()); LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix, formattedLastDate); QueryBuilder queryBuilder = QueryBuilders.rangeQuery("nextFetchDate") .lte(formattedLastDate); if (filterQuery != null) { queryBuilder = boolQuery().must(queryBuilder).filter( QueryBuilders.queryStringQuery(filterQuery)); } SearchRequest request = new SearchRequest(indexName).types(docType) .searchType(SearchType.QUERY_THEN_FETCH); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.query(queryBuilder); sourceBuilder.from(lastStartOffset); sourceBuilder.size(maxBucketNum); sourceBuilder.explain(false); sourceBuilder.trackTotalHits(false); // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html // _shards:2,3 if (shardID != -1) { request.preference("_shards:" + shardID); } if (StringUtils.isNotBlank(totalSortField)) { sourceBuilder.sort(new FieldSortBuilder(totalSortField) .order(SortOrder.ASC)); } CollapseBuilder collapse = new CollapseBuilder(partitionField); // group expansion -> sends sub queries for each bucket if (maxURLsPerBucket > 1) { InnerHitBuilder ihb = new InnerHitBuilder(); ihb.setSize(maxURLsPerBucket); ihb.setName("urls_per_bucket"); // sort within a bucket if (StringUtils.isNotBlank(bucketSortField)) { List<SortBuilder<?>> sorts = new LinkedList<>(); FieldSortBuilder bucketsorter = SortBuilders.fieldSort( bucketSortField).order(SortOrder.ASC); sorts.add(bucketsorter); ihb.setSorts(sorts); } collapse.setInnerHits(ihb); } sourceBuilder.collapse(collapse); request.source(sourceBuilder); // dump query to log LOG.debug("{} ES query {}", logIdprefix, request.toString()); isInQuery.set(true); client.searchAsync(request, this); } #location 15 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); super.open(stormConf, context, collector); partitioner = new URLPartitioner(); partitioner.configure(stormConf); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); // one ES client per JVM synchronized (ElasticSearchSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("Assigned shard ID {}", shardID); } partitioner = new URLPartitioner(); partitioner.configure(stormConf); _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 11 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void store(String url, Status status, Metadata metadata, Date nextFetch, Tuple tuple) throws Exception { String sha256hex = org.apache.commons.codec.digest.DigestUtils .sha256Hex(url); // need to synchronize: otherwise it might get added to the cache // without having been sent to ES synchronized (waitAck) { // check that the same URL is not being sent to ES List<Tuple> alreadySent = waitAck.getIfPresent(sha256hex); if (alreadySent != null && status.equals(Status.DISCOVERED)) { // if this object is discovered - adding another version of it // won't make any difference LOG.debug( "Already being sent to ES {} with status {} and ID {}", url, status, sha256hex); // ack straight away! super.ack(tuple, url); return; } } XContentBuilder builder = jsonBuilder().startObject(); builder.field("url", url); builder.field("status", status); // check that we don't overwrite an existing entry // When create is used, the index operation will fail if a document // by that id already exists in the index. boolean create = status.equals(Status.DISCOVERED); builder.startObject("metadata"); Iterator<String> mdKeys = metadata.keySet().iterator(); while (mdKeys.hasNext()) { String mdKey = mdKeys.next(); String[] values = metadata.getValues(mdKey); // periods are not allowed in ES2 - replace with %2E mdKey = mdKey.replaceAll("\\.", "%2E"); builder.array(mdKey, values); } String partitionKey = partitioner.getPartition(url, metadata); if (partitionKey == null) { partitionKey = "_DEFAULT_"; } // store routing key in metadata? if (StringUtils.isNotBlank(fieldNameForRoutingKey) && routingFieldNameInMetadata) { builder.field(fieldNameForRoutingKey, partitionKey); } builder.endObject(); // store routing key outside metadata? if (StringUtils.isNotBlank(fieldNameForRoutingKey) && !routingFieldNameInMetadata) { builder.field(fieldNameForRoutingKey, partitionKey); } builder.field("nextFetchDate", nextFetch); builder.endObject(); IndexRequest request = new IndexRequest(getIndexName(metadata)); request.source(builder).id(sha256hex).create(create); if (doRouting) { request.routing(partitionKey); } synchronized (waitAck) { List<Tuple> tt = waitAck.getIfPresent(sha256hex); if (tt == null) { tt = new LinkedList<>(); waitAck.put(sha256hex, tt); } tt.add(tuple); LOG.debug("Added to waitAck {} with ID {} total {}", url, sha256hex, tt.size()); } LOG.debug("Sending to ES buffer {} with ID {}", url, sha256hex); connection.getProcessor().add(request); }
#vulnerable code @Override public void store(String url, Status status, Metadata metadata, Date nextFetch, Tuple tuple) throws Exception { String sha256hex = org.apache.commons.codec.digest.DigestUtils .sha256Hex(url); // need to synchronize: otherwise it might get added to the cache // without having been sent to ES synchronized (waitAck) { // check that the same URL is not being sent to ES List<Tuple> alreadySent = waitAck.getIfPresent(sha256hex); if (alreadySent != null && status.equals(Status.DISCOVERED)) { // if this object is discovered - adding another version of it // won't make any difference LOG.debug( "Already being sent to ES {} with status {} and ID {}", url, status, sha256hex); // ack straight away! super.ack(tuple, url); return; } } XContentBuilder builder = jsonBuilder().startObject(); builder.field("url", url); builder.field("status", status); // check that we don't overwrite an existing entry // When create is used, the index operation will fail if a document // by that id already exists in the index. boolean create = status.equals(Status.DISCOVERED); builder.startObject("metadata"); Iterator<String> mdKeys = metadata.keySet().iterator(); while (mdKeys.hasNext()) { String mdKey = mdKeys.next(); String[] values = metadata.getValues(mdKey); // periods are not allowed in ES2 - replace with %2E mdKey = mdKey.replaceAll("\\.", "%2E"); builder.array(mdKey, values); } String partitionKey = partitioner.getPartition(url, metadata); if (partitionKey == null) { partitionKey = "_DEFAULT_"; } // store routing key in metadata? if (StringUtils.isNotBlank(fieldNameForRoutingKey) && routingFieldNameInMetadata) { builder.field(fieldNameForRoutingKey, partitionKey); } builder.endObject(); // store routing key outside metadata? if (StringUtils.isNotBlank(fieldNameForRoutingKey) && !routingFieldNameInMetadata) { builder.field(fieldNameForRoutingKey, partitionKey); } builder.field("nextFetchDate", nextFetch); builder.endObject(); IndexRequest request = new IndexRequest(getIndexName(metadata)) .type(docType); request.source(builder).id(sha256hex).create(create); if (doRouting) { request.routing(partitionKey); } synchronized (waitAck) { List<Tuple> tt = waitAck.getIfPresent(sha256hex); if (tt == null) { tt = new LinkedList<>(); waitAck.put(sha256hex, tt); } tt.add(tuple); LOG.debug("Added to waitAck {} with ID {} total {}", url, sha256hex, tt.size()); } LOG.debug("Sending to ES buffer {} with ID {}", url, sha256hex); connection.getProcessor().add(request); } #location 68 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override protected void populateBuffer() { if (lastDate == null) { lastDate = new Date(); } String formattedLastDate = String.format(DATEFORMAT, lastDate); LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix, formattedLastDate); QueryBuilder rangeQueryBuilder = QueryBuilders.rangeQuery( "nextFetchDate").lte(formattedLastDate); SearchRequestBuilder srb = client.prepareSearch(indexName) .setTypes(docType).setSearchType(SearchType.QUERY_THEN_FETCH) .setQuery(rangeQueryBuilder).setFrom(0).setSize(0) .setExplain(false); TermsAggregationBuilder aggregations = AggregationBuilders .terms("partition").field(partitionField).size(maxBucketNum); TopHitsAggregationBuilder tophits = AggregationBuilders.topHits("docs") .size(maxURLsPerBucket).explain(false); // sort within a bucket if (StringUtils.isNotBlank(bucketSortField)) { FieldSortBuilder sorter = SortBuilders.fieldSort(bucketSortField) .order(SortOrder.ASC); tophits.sort(sorter); } aggregations.subAggregation(tophits); // sort between buckets if (StringUtils.isNotBlank(totalSortField)) { MinAggregationBuilder minBuilder = AggregationBuilders.min( "top_hit").field(totalSortField); aggregations.subAggregation(minBuilder); aggregations.order(Terms.Order.aggregation("top_hit", true)); } if (sample) { DiversifiedAggregationBuilder sab = new DiversifiedAggregationBuilder( "sample"); sab.field(partitionField).maxDocsPerValue(maxURLsPerBucket); sab.shardSize(maxURLsPerBucket * maxBucketNum); sab.subAggregation(aggregations); srb.addAggregation(sab); } else { srb.addAggregation(aggregations); } // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html // _shards:2,3 if (shardID != -1) { srb.setPreference("_shards:" + shardID); } // dump query to log LOG.debug("{} ES query {}", logIdprefix, srb.toString()); timeStartESQuery = System.currentTimeMillis(); isInESQuery.set(true); srb.execute(this); }
#vulnerable code @Override protected void populateBuffer() { if (lastDate == null) { lastDate = String.format(DATEFORMAT, new Date()); } LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix, lastDate); QueryBuilder rangeQueryBuilder = QueryBuilders.rangeQuery( "nextFetchDate").lte(lastDate); SearchRequestBuilder srb = client.prepareSearch(indexName) .setTypes(docType).setSearchType(SearchType.QUERY_THEN_FETCH) .setQuery(rangeQueryBuilder).setFrom(0).setSize(0) .setExplain(false); TermsAggregationBuilder aggregations = AggregationBuilders .terms("partition").field(partitionField).size(maxBucketNum); TopHitsAggregationBuilder tophits = AggregationBuilders.topHits("docs") .size(maxURLsPerBucket).explain(false); // sort within a bucket if (StringUtils.isNotBlank(bucketSortField)) { FieldSortBuilder sorter = SortBuilders.fieldSort(bucketSortField) .order(SortOrder.ASC); tophits.sort(sorter); } aggregations.subAggregation(tophits); // sort between buckets if (StringUtils.isNotBlank(totalSortField)) { MinAggregationBuilder minBuilder = AggregationBuilders.min( "top_hit").field(totalSortField); aggregations.subAggregation(minBuilder); aggregations.order(Terms.Order.aggregation("top_hit", true)); } if (sample) { DiversifiedAggregationBuilder sab = new DiversifiedAggregationBuilder( "sample"); sab.field(partitionField).maxDocsPerValue(maxURLsPerBucket); sab.shardSize(maxURLsPerBucket * maxBucketNum); sab.subAggregation(aggregations); srb.addAggregation(sab); } else { srb.addAggregation(aggregations); } // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html // _shards:2,3 if (shardID != -1) { srb.setPreference("_shards:" + shardID); } // dump query to log LOG.debug("{} ES query {}", logIdprefix, srb.toString()); timeStartESQuery = System.currentTimeMillis(); isInESQuery.set(true); srb.execute(this); } #location 12 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); super.open(stormConf, context, collector); partitioner = new URLPartitioner(); partitioner.configure(stormConf); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); // one ES client per JVM synchronized (ElasticSearchSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("Assigned shard ID {}", shardID); } partitioner = new URLPartitioner(); partitioner.configure(stormConf); _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 51 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeLastQuery; Aggregations aggregs = response.getAggregations(); if (aggregs == null) { isInQuery.set(false); return; } SingleBucketAggregation sample = aggregs.get("sample"); if (sample != null) { aggregs = sample.getAggregations(); } Terms agg = aggregs.get("partition"); int numhits = 0; int numBuckets = 0; int alreadyprocessed = 0; Date mostRecentDateFound = null; SimpleDateFormat formatter = new SimpleDateFormat( "yyyy-MM-dd'T'HH:mm:ss.SSSX"); synchronized (buffer) { // For each entry Iterator<Terms.Bucket> iterator = (Iterator<Bucket>) agg .getBuckets().iterator(); while (iterator.hasNext()) { Terms.Bucket entry = iterator.next(); String key = (String) entry.getKey(); // bucket key long docCount = entry.getDocCount(); // Doc count int hitsForThisBucket = 0; // filter results so that we don't include URLs we are already // being processed TopHits topHits = entry.getAggregations().get("docs"); for (SearchHit hit : topHits.getHits().getHits()) { hitsForThisBucket++; Map<String, Object> keyValues = hit.getSourceAsMap(); String url = (String) keyValues.get("url"); LOG.debug("{} -> id [{}], _source [{}]", logIdprefix, hit.getId(), hit.getSourceAsString()); // consider only the first document of the last bucket // for optimising the nextFetchDate if (hitsForThisBucket == 1 && !iterator.hasNext()) { String strDate = (String) keyValues .get("nextFetchDate"); try { mostRecentDateFound = formatter.parse(strDate); } catch (ParseException e) { throw new RuntimeException("can't parse date :" + strDate); } } // is already being processed - skip it! if (beingProcessed.containsKey(url)) { alreadyprocessed++; continue; } Metadata metadata = fromKeyValues(keyValues); buffer.add(new Values(url, metadata)); } if (hitsForThisBucket > 0) numBuckets++; numhits += hitsForThisBucket; LOG.debug("{} key [{}], hits[{}], doc_count [{}]", logIdprefix, key, hitsForThisBucket, docCount, alreadyprocessed); } // Shuffle the URLs so that we don't get blocks of URLs from the // same // host or domain Collections.shuffle((List) buffer); } LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numhits, numBuckets, timeTaken, alreadyprocessed); queryTimes.addMeasurement(timeTaken); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numhits); // optimise the nextFetchDate by getting the most recent value // returned in the query and add to it, unless the previous value is // within n mins in which case we'll keep it if (mostRecentDateFound != null && recentDateIncrease >= 0) { Calendar potentialNewDate = Calendar.getInstance(); potentialNewDate.setTime(mostRecentDateFound); potentialNewDate.add(Calendar.MINUTE, recentDateIncrease); Date oldDate = null; // check boundaries if (this.recentDateMinGap > 0) { Calendar low = Calendar.getInstance(); low.setTime(queryDate); low.add(Calendar.MINUTE, -recentDateMinGap); Calendar high = Calendar.getInstance(); high.setTime(queryDate); high.add(Calendar.MINUTE, recentDateMinGap); if (high.before(potentialNewDate) || low.after(potentialNewDate)) { oldDate = queryDate; } } else { oldDate = queryDate; } if (oldDate != null) { queryDate = potentialNewDate.getTime(); LOG.info( "{} lastDate changed from {} to {} based on mostRecentDateFound {}", logIdprefix, oldDate, queryDate, mostRecentDateFound); } else { LOG.info( "{} lastDate kept at {} based on mostRecentDateFound {}", logIdprefix, queryDate, mostRecentDateFound); } } // reset the value for next fetch date if the previous one is too old if (resetFetchDateAfterNSecs != -1) { Instant changeNeededOn = Instant.ofEpochMilli(lastTimeResetToNOW .toEpochMilli() + (resetFetchDateAfterNSecs * 1000)); if (Instant.now().isAfter(changeNeededOn)) { LOG.info( "{} lastDate set to null based on resetFetchDateAfterNSecs {}", logIdprefix, resetFetchDateAfterNSecs); queryDate = null; } } // change the date if we don't get any results at all if (numBuckets == 0) { queryDate = null; } // remove lock isInQuery.set(false); }
#vulnerable code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeLastQuery; Aggregations aggregs = response.getAggregations(); if (aggregs == null) { isInQuery.set(false); return; } SingleBucketAggregation sample = aggregs.get("sample"); if (sample != null) { aggregs = sample.getAggregations(); } Terms agg = aggregs.get("partition"); int numhits = 0; int numBuckets = 0; int alreadyprocessed = 0; Date mostRecentDateFound = null; SimpleDateFormat formatter = new SimpleDateFormat( "yyyy-MM-dd'T'HH:mm:ss.SSSX"); synchronized (buffer) { // For each entry Iterator<Terms.Bucket> iterator = (Iterator<Bucket>) agg .getBuckets().iterator(); while (iterator.hasNext()) { Terms.Bucket entry = iterator.next(); String key = (String) entry.getKey(); // bucket key long docCount = entry.getDocCount(); // Doc count int hitsForThisBucket = 0; // filter results so that we don't include URLs we are already // being processed TopHits topHits = entry.getAggregations().get("docs"); for (SearchHit hit : topHits.getHits().getHits()) { hitsForThisBucket++; Map<String, Object> keyValues = hit.getSourceAsMap(); String url = (String) keyValues.get("url"); LOG.debug("{} -> id [{}], _source [{}]", logIdprefix, hit.getId(), hit.getSourceAsString()); // consider only the first document of the last bucket // for optimising the nextFetchDate if (hitsForThisBucket == 1 && !iterator.hasNext()) { String strDate = (String) keyValues .get("nextFetchDate"); try { mostRecentDateFound = formatter.parse(strDate); } catch (ParseException e) { throw new RuntimeException("can't parse date :" + strDate); } } // is already being processed - skip it! if (beingProcessed.containsKey(url)) { alreadyprocessed++; continue; } Metadata metadata = fromKeyValues(keyValues); buffer.add(new Values(url, metadata)); } if (hitsForThisBucket > 0) numBuckets++; numhits += hitsForThisBucket; LOG.debug("{} key [{}], hits[{}], doc_count [{}]", logIdprefix, key, hitsForThisBucket, docCount, alreadyprocessed); } // Shuffle the URLs so that we don't get blocks of URLs from the // same // host or domain Collections.shuffle((List) buffer); } LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numhits, numBuckets, timeTaken, alreadyprocessed); queryTimes.addMeasurement(timeTaken); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numhits); // reset the value for next fetch date if the previous one is too old if (resetFetchDateAfterNSecs != -1) { Calendar diffCal = Calendar.getInstance(); diffCal.setTime(lastDate); diffCal.add(Calendar.SECOND, resetFetchDateAfterNSecs); // compare to now if (diffCal.before(Calendar.getInstance())) { LOG.info( "{} lastDate set to null based on resetFetchDateAfterNSecs {}", logIdprefix, resetFetchDateAfterNSecs); lastDate = null; } } // optimise the nextFetchDate by getting the most recent value // returned in the query and add to it, unless the previous value is // within n mins in which case we'll keep it else if (mostRecentDateFound != null && recentDateIncrease >= 0) { Calendar potentialNewDate = Calendar.getInstance(); potentialNewDate.setTime(mostRecentDateFound); potentialNewDate.add(Calendar.MINUTE, recentDateIncrease); Date oldDate = null; // check boundaries if (this.recentDateMinGap > 0) { Calendar low = Calendar.getInstance(); low.setTime(lastDate); low.add(Calendar.MINUTE, -recentDateMinGap); Calendar high = Calendar.getInstance(); high.setTime(lastDate); high.add(Calendar.MINUTE, recentDateMinGap); if (high.before(potentialNewDate) || low.after(potentialNewDate)) { oldDate = lastDate; } } else { oldDate = lastDate; } if (oldDate != null) { lastDate = potentialNewDate.getTime(); LOG.info( "{} lastDate changed from {} to {} based on mostRecentDateFound {}", logIdprefix, oldDate, lastDate, mostRecentDateFound); } else { LOG.info( "{} lastDate kept at {} based on mostRecentDateFound {}", logIdprefix, lastDate, mostRecentDateFound); } } // change the date if we don't get any results at all if (numBuckets == 0) { lastDate = null; } // remove lock isInQuery.set(false); } #location 88 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void execute(Tuple input) { // triggered by the arrival of a tuple // be it a tick or normal one flushQueues(); if (isTickTuple(input)) { _collector.ack(input); return; } CountMetric metric = metricGauge.scope("activethreads"); metric.getValueAndReset(); metric.incrBy(this.activeThreads.get()); metric = metricGauge.scope("in queues"); metric.getValueAndReset(); metric.incrBy(this.fetchQueues.inQueues.get()); metric = metricGauge.scope("queues"); metric.getValueAndReset(); metric.incrBy(this.fetchQueues.queues.size()); LOG.info("[Fetcher #" + taskIndex + "] Threads : " + this.activeThreads.get() + "\tqueues : " + this.fetchQueues.queues.size() + "\tin_queues : " + this.fetchQueues.inQueues.get()); String url = input.getStringByField("url"); // check whether this tuple has a url field if (url == null) { LOG.info("[Fetcher #" + taskIndex + "] Missing url field for tuple " + input); // ignore silently _collector.ack(input); return; } fetchQueues.addFetchItem(input); }
#vulnerable code @Override public void execute(Tuple input) { // main thread in charge of acking and failing // see // https://github.com/nathanmarz/storm/wiki/Troubleshooting#nullpointerexception-from-deep-inside-storm int acked = 0; int failed = 0; int emitted = 0; // emit with or without anchors // before acking synchronized (emitQueue) { for (Object[] toemit : this.emitQueue) { String streamID = (String) toemit[0]; Tuple anchor = (Tuple) toemit[1]; Values vals = (Values) toemit[2]; if (anchor == null) _collector.emit(streamID, vals); else _collector.emit(streamID, Arrays.asList(anchor), vals); } emitted = emitQueue.size(); emitQueue.clear(); } // have a tick tuple to make sure we don't get starved synchronized (ackQueue) { for (Tuple toack : this.ackQueue) { _collector.ack(toack); } acked = ackQueue.size(); ackQueue.clear(); } synchronized (failQueue) { for (Tuple toack : this.failQueue) { _collector.fail(toack); } failed = failQueue.size(); failQueue.clear(); } if (acked + failed + emitted > 0) LOG.info("[Fetcher #" + taskIndex + "] Acked : " + acked + "\tFailed : " + failed + "\tEmitted : " + emitted); if (isTickTuple(input)) { _collector.ack(input); return; } CountMetric metric = metricGauge.scope("activethreads"); metric.getValueAndReset(); metric.incrBy(this.activeThreads.get()); metric = metricGauge.scope("in queues"); metric.getValueAndReset(); metric.incrBy(this.fetchQueues.inQueues.get()); metric = metricGauge.scope("queues"); metric.getValueAndReset(); metric.incrBy(this.fetchQueues.queues.size()); LOG.info("[Fetcher #" + taskIndex + "] Threads : " + this.activeThreads.get() + "\tqueues : " + this.fetchQueues.queues.size() + "\tin_queues : " + this.fetchQueues.inQueues.get()); String url = input.getStringByField("url"); // check whether this tuple has a url field if (url == null) { LOG.info("[Fetcher #" + taskIndex + "] Missing url field for tuple " + input); // ignore silently _collector.ack(input); return; } fetchQueues.addFetchItem(input); } #location 81 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void execute(Tuple tuple) { String url = tuple.getStringByField("url"); // Distinguish the value used for indexing // from the one used for the status String normalisedurl = valueForURL(tuple); LOG.info("Indexing {} as {}", url, normalisedurl); Metadata metadata = (Metadata) tuple.getValueByField("metadata"); String text = tuple.getStringByField("text"); boolean keep = filterDocument(metadata); if (!keep) { LOG.info("Filtered {}", url); eventCounter.scope("Filtered").incrBy(1); // treat it as successfully processed even if // we do not index it _collector.emit(StatusStreamName, tuple, new Values(url, metadata, Status.FETCHED)); _collector.ack(tuple); return; } String docID = org.apache.commons.codec.digest.DigestUtils .sha256Hex(normalisedurl); try { XContentBuilder builder = jsonBuilder().startObject(); // display text of the document? if (fieldNameForText() != null) { builder.field(fieldNameForText(), trimText(text)); } // send URL as field? if (fieldNameForURL() != null) { builder.field(fieldNameForURL(), normalisedurl); } // which metadata to display? Map<String, String[]> keyVals = filterMetadata(metadata); Iterator<String> iterator = keyVals.keySet().iterator(); while (iterator.hasNext()) { String fieldName = iterator.next(); String[] values = keyVals.get(fieldName); if (values.length == 1) { builder.field(fieldName, values[0]); } else if (values.length > 1) { builder.array(fieldName, values); } } builder.endObject(); String sha256hex = org.apache.commons.codec.digest.DigestUtils .sha256Hex(normalisedurl); IndexRequest indexRequest = new IndexRequest(getIndexName(metadata)) .source(builder).id(sha256hex); DocWriteRequest.OpType optype = DocWriteRequest.OpType.INDEX; if (create) { optype = DocWriteRequest.OpType.CREATE; } indexRequest.opType(optype); if (pipeline != null) { indexRequest.setPipeline(pipeline); } connection.getProcessor().add(indexRequest); eventCounter.scope("Indexed").incrBy(1); perSecMetrics.scope("Indexed").update(1); synchronized (waitAck) { waitAck.put(docID, tuple); } } catch (IOException e) { LOG.error("Error building document for ES", e); // do not send to status stream so that it gets replayed _collector.fail(tuple); if (docID != null) { synchronized (waitAck) { waitAck.invalidate(docID); } } } }
#vulnerable code @Override public void execute(Tuple tuple) { String url = tuple.getStringByField("url"); // Distinguish the value used for indexing // from the one used for the status String normalisedurl = valueForURL(tuple); LOG.info("Indexing {} as {}", url, normalisedurl); Metadata metadata = (Metadata) tuple.getValueByField("metadata"); String text = tuple.getStringByField("text"); boolean keep = filterDocument(metadata); if (!keep) { LOG.info("Filtered {}", url); eventCounter.scope("Filtered").incrBy(1); // treat it as successfully processed even if // we do not index it _collector.emit(StatusStreamName, tuple, new Values(url, metadata, Status.FETCHED)); _collector.ack(tuple); return; } String docID = org.apache.commons.codec.digest.DigestUtils .sha256Hex(normalisedurl); try { XContentBuilder builder = jsonBuilder().startObject(); // display text of the document? if (fieldNameForText() != null) { builder.field(fieldNameForText(), trimText(text)); } // send URL as field? if (fieldNameForURL() != null) { builder.field(fieldNameForURL(), normalisedurl); } // which metadata to display? Map<String, String[]> keyVals = filterMetadata(metadata); Iterator<String> iterator = keyVals.keySet().iterator(); while (iterator.hasNext()) { String fieldName = iterator.next(); String[] values = keyVals.get(fieldName); if (values.length == 1) { builder.field(fieldName, values[0]); } else if (values.length > 1) { builder.array(fieldName, values); } } builder.endObject(); IndexRequest indexRequest = new IndexRequest( getIndexName(metadata), docType, docID).source(builder); DocWriteRequest.OpType optype = DocWriteRequest.OpType.INDEX; if (create) { optype = DocWriteRequest.OpType.CREATE; } indexRequest.opType(optype); if (pipeline != null) { indexRequest.setPipeline(pipeline); } connection.getProcessor().add(indexRequest); eventCounter.scope("Indexed").incrBy(1); perSecMetrics.scope("Indexed").update(1); synchronized (waitAck) { waitAck.put(docID, tuple); } } catch (IOException e) { LOG.error("Error building document for ES", e); // do not send to status stream so that it gets replayed _collector.fail(tuple); if (docID != null) { synchronized (waitAck) { waitAck.invalidate(docID); } } } } #location 60 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); super.open(stormConf, context, collector); partitioner = new URLPartitioner(); partitioner.configure(stormConf); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); // one ES client per JVM synchronized (ElasticSearchSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("Assigned shard ID {}", shardID); } partitioner = new URLPartitioner(); partitioner.configure(stormConf); _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 15 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); super.open(stormConf, context, collector); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); // one ES client per JVM synchronized (AggregationSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { logIdprefix = "[" + context.getThisComponentId() + " #" + context.getThisTaskIndex() + "] "; // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("{} assigned shard ID {}", logIdprefix, shardID); } _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 13 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); super.open(stormConf, context, collector); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); // one ES client per JVM synchronized (AggregationSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { logIdprefix = "[" + context.getThisComponentId() + " #" + context.getThisTaskIndex() + "] "; // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("{} assigned shard ID {}", logIdprefix, shardID); } _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 64 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override protected void populateBuffer() { if (queryDate == null) { queryDate = new Date(); lastTimeResetToNOW = Instant.now(); } String formattedLastDate = ISODateTimeFormat.dateTimeNoMillis().print( queryDate.getTime()); LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix, formattedLastDate); QueryBuilder queryBuilder = QueryBuilders.rangeQuery("nextFetchDate") .lte(formattedLastDate); if (filterQuery != null) { queryBuilder = boolQuery().must(queryBuilder).filter( QueryBuilders.queryStringQuery(filterQuery)); } SearchRequest request = new SearchRequest(indexName).types(docType) .searchType(SearchType.QUERY_THEN_FETCH); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.query(queryBuilder); sourceBuilder.from(0); sourceBuilder.size(0); sourceBuilder.explain(false); sourceBuilder.trackTotalHits(false); TermsAggregationBuilder aggregations = AggregationBuilders .terms("partition").field(partitionField).size(maxBucketNum); TopHitsAggregationBuilder tophits = AggregationBuilders.topHits("docs") .size(maxURLsPerBucket).explain(false); // sort within a bucket if (StringUtils.isNotBlank(bucketSortField)) { FieldSortBuilder sorter = SortBuilders.fieldSort(bucketSortField) .order(SortOrder.ASC); tophits.sort(sorter); } aggregations.subAggregation(tophits); // sort between buckets if (StringUtils.isNotBlank(totalSortField)) { MinAggregationBuilder minBuilder = AggregationBuilders.min( "top_hit").field(totalSortField); aggregations.subAggregation(minBuilder); aggregations.order(BucketOrder.aggregation("top_hit", true)); } if (sample) { DiversifiedAggregationBuilder sab = new DiversifiedAggregationBuilder( "sample"); sab.field(partitionField).maxDocsPerValue(maxURLsPerBucket); sab.shardSize(maxURLsPerBucket * maxBucketNum); sab.subAggregation(aggregations); sourceBuilder.aggregation(sab); } else { sourceBuilder.aggregation(aggregations); } request.source(sourceBuilder); // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html // _shards:2,3 if (shardID != -1) { request.preference("_shards:" + shardID); } // dump query to log LOG.debug("{} ES query {}", logIdprefix, request.toString()); isInQuery.set(true); client.searchAsync(request, this); }
#vulnerable code @Override protected void populateBuffer() { if (lastDate == null) { lastDate = new Date(); } String formattedLastDate = ISODateTimeFormat.dateTimeNoMillis().print( lastDate.getTime()); LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix, formattedLastDate); QueryBuilder queryBuilder = QueryBuilders.rangeQuery("nextFetchDate") .lte(formattedLastDate); if (filterQuery != null) { queryBuilder = boolQuery().must(queryBuilder).filter( QueryBuilders.queryStringQuery(filterQuery)); } SearchRequest request = new SearchRequest(indexName).types(docType) .searchType(SearchType.QUERY_THEN_FETCH); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.query(queryBuilder); sourceBuilder.from(0); sourceBuilder.size(0); sourceBuilder.explain(false); sourceBuilder.trackTotalHits(false); TermsAggregationBuilder aggregations = AggregationBuilders .terms("partition").field(partitionField).size(maxBucketNum); TopHitsAggregationBuilder tophits = AggregationBuilders.topHits("docs") .size(maxURLsPerBucket).explain(false); // sort within a bucket if (StringUtils.isNotBlank(bucketSortField)) { FieldSortBuilder sorter = SortBuilders.fieldSort(bucketSortField) .order(SortOrder.ASC); tophits.sort(sorter); } aggregations.subAggregation(tophits); // sort between buckets if (StringUtils.isNotBlank(totalSortField)) { MinAggregationBuilder minBuilder = AggregationBuilders.min( "top_hit").field(totalSortField); aggregations.subAggregation(minBuilder); aggregations.order(BucketOrder.aggregation("top_hit", true)); } if (sample) { DiversifiedAggregationBuilder sab = new DiversifiedAggregationBuilder( "sample"); sab.field(partitionField).maxDocsPerValue(maxURLsPerBucket); sab.shardSize(maxURLsPerBucket * maxBucketNum); sab.subAggregation(aggregations); sourceBuilder.aggregation(sab); } else { sourceBuilder.aggregation(aggregations); } request.source(sourceBuilder); // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html // _shards:2,3 if (shardID != -1) { request.preference("_shards:" + shardID); } // dump query to log LOG.debug("{} ES query {}", logIdprefix, request.toString()); isInQuery.set(true); client.searchAsync(request, this); } #location 9 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private List<RegexRule> readRules(String rulesFile) { List<RegexRule> rules = new ArrayList<RegexRule>(); try { InputStream regexStream = getClass().getClassLoader() .getResourceAsStream(rulesFile); Reader reader = new InputStreamReader(regexStream, StandardCharsets.UTF_8); BufferedReader in = new BufferedReader(reader); String line; while ((line = in.readLine()) != null) { if (line.length() == 0) { continue; } char first = line.charAt(0); boolean sign = false; switch (first) { case '+': sign = true; break; case '-': sign = false; break; case ' ': case '\n': case '#': // skip blank & comment lines continue; default: throw new IOException("Invalid first character: " + line); } String regex = line.substring(1); LOG.trace("Adding rule [{}]", regex); RegexRule rule = createRule(sign, regex); rules.add(rule); } } catch (IOException e) { LOG.error("There was an error reading the default-regex-filters file"); e.printStackTrace(); } return rules; }
#vulnerable code private List<RegexRule> readRules(String rulesFile) { List<RegexRule> rules = new ArrayList<RegexRule>(); try { InputStream regexStream = getClass().getClassLoader() .getResourceAsStream(rulesFile); Reader reader = new InputStreamReader(regexStream, "UTF-8"); BufferedReader in = new BufferedReader(reader); String line; while ((line = in.readLine()) != null) { if (line.length() == 0) { continue; } char first = line.charAt(0); boolean sign = false; switch (first) { case '+': sign = true; break; case '-': sign = false; break; case ' ': case '\n': case '#': // skip blank & comment lines continue; default: throw new IOException("Invalid first character: " + line); } String regex = line.substring(1); LOG.trace("Adding rule [{}]", regex); RegexRule rule = createRule(sign, regex); rules.add(rule); } } catch (IOException e) { LOG.error("There was an error reading the default-regex-filters file"); e.printStackTrace(); } return rules; } #location 39 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); super.open(stormConf, context, collector); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); // one ES client per JVM synchronized (AggregationSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { logIdprefix = "[" + context.getThisComponentId() + " #" + context.getThisTaskIndex() + "] "; // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("{} assigned shard ID {}", logIdprefix, shardID); } _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 60 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeLastQuery; SearchHit[] hits = response.getHits().getHits(); int numBuckets = hits.length; int alreadyprocessed = 0; int numDocs = 0; synchronized (buffer) { for (SearchHit hit : hits) { Map<String, SearchHits> innerHits = hit.getInnerHits(); // wanted just one per bucket : no inner hits if (innerHits == null) { numDocs++; if (!addHitToBuffer(hit)) { alreadyprocessed++; } continue; } // more than one per bucket SearchHits inMyBucket = innerHits.get("urls_per_bucket"); for (SearchHit subHit : inMyBucket.getHits()) { numDocs++; if (!addHitToBuffer(subHit)) { alreadyprocessed++; } } } // Shuffle the URLs so that we don't get blocks of URLs from the // same host or domain if (numBuckets != numDocs) { Collections.shuffle((List) buffer); } } queryTimes.addMeasurement(timeTaken); // could be derived from the count of query times above eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numDocs); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numDocs, numBuckets, timeTaken, alreadyprocessed); // reset the value for next fetch date if the previous one is too old if (resetFetchDateAfterNSecs != -1) { Instant changeNeededOn = Instant.ofEpochMilli(lastTimeResetToNOW .toEpochMilli() + (resetFetchDateAfterNSecs * 1000)); if (Instant.now().isAfter(changeNeededOn)) { LOG.info("lastDate reset based on resetFetchDateAfterNSecs {}", resetFetchDateAfterNSecs); queryDate = null; lastStartOffset = 0; } } // no more results? if (numBuckets == 0) { queryDate = null; lastStartOffset = 0; } // still got some results but paging won't help else if (numBuckets < maxBucketNum) { lastStartOffset = 0; } else { lastStartOffset += numBuckets; } // remove lock isInQuery.set(false); }
#vulnerable code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeLastQuery; SearchHit[] hits = response.getHits().getHits(); int numBuckets = hits.length; // reset the value for next fetch date if the previous one is too old if (resetFetchDateAfterNSecs != -1) { Calendar diffCal = Calendar.getInstance(); diffCal.setTime(lastDate); diffCal.add(Calendar.SECOND, resetFetchDateAfterNSecs); // compare to now if (diffCal.before(Calendar.getInstance())) { LOG.info( "{} lastDate set to null based on resetFetchDateAfterNSecs {}", logIdprefix, resetFetchDateAfterNSecs); lastDate = null; lastStartOffset = 0; } } int alreadyprocessed = 0; int numDocs = 0; synchronized (buffer) { for (SearchHit hit : hits) { Map<String, SearchHits> innerHits = hit.getInnerHits(); // wanted just one per bucket : no inner hits if (innerHits == null) { numDocs++; if (!addHitToBuffer(hit)) { alreadyprocessed++; } continue; } // more than one per bucket SearchHits inMyBucket = innerHits.get("urls_per_bucket"); for (SearchHit subHit : inMyBucket.getHits()) { numDocs++; if (!addHitToBuffer(subHit)) { alreadyprocessed++; } } } // Shuffle the URLs so that we don't get blocks of URLs from the // same host or domain if (numBuckets != numDocs) { Collections.shuffle((List) buffer); } } queryTimes.addMeasurement(timeTaken); // could be derived from the count of query times above eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numDocs); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numDocs, numBuckets, timeTaken, alreadyprocessed); // no more results? if (numBuckets == 0) { lastDate = null; lastStartOffset = 0; } // still got some results but paging won't help else if (numBuckets < maxBucketNum) { lastStartOffset = 0; } else { lastStartOffset += numBuckets; } // remove lock isInQuery.set(false); } #location 18 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); super.open(stormConf, context, collector); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); // one ES client per JVM synchronized (AggregationSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { logIdprefix = "[" + context.getThisComponentId() + " #" + context.getThisTaskIndex() + "] "; // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("{} assigned shard ID {}", logIdprefix, shardID); } _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 16 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); super.open(stormConf, context, collector); partitioner = new URLPartitioner(); partitioner.configure(stormConf); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); // one ES client per JVM synchronized (ElasticSearchSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("Assigned shard ID {}", shardID); } partitioner = new URLPartitioner(); partitioner.configure(stormConf); _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 7 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); super.open(stormConf, context, collector); partitioner = new URLPartitioner(); partitioner.configure(stormConf); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); // one ES client per JVM synchronized (ElasticSearchSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("Assigned shard ID {}", shardID); } partitioner = new URLPartitioner(); partitioner.configure(stormConf); _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 55 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void filter(String url, byte[] content, DocumentFragment doc, ParseResult parse) { // check whether the metadata already contains a lang value // in which case we normalise its value and use it Metadata m = parse.get(url).getMetadata(); String extractedValue = m.getFirstValue(extractedKeyName); if (StringUtils.isNotBlank(extractedValue) && extractedValue.length() > 1) { extractedValue = extractedValue.substring(0, 2).toLowerCase( Locale.ENGLISH); LOG.info("Lang: {} extracted from page for {}", extractedValue, url); m.setValue(mdKey, extractedValue); return; } String text = parse.get(url).getText(); if (StringUtils.isBlank(text)) { return; } TextObject textObject = textObjectFactory.forText(text); synchronized (languageDetector) { List<DetectedLanguage> probs = languageDetector .getProbabilities(textObject); if (probs == null || probs.size() == 0) { return; } for (DetectedLanguage lang : probs) { if (lang.getProbability() >= minProb) { String code = lang.getLocale().getLanguage(); parse.get(url).getMetadata().addValue(mdKey, code); } } } }
#vulnerable code @Override public void filter(String url, byte[] content, DocumentFragment doc, ParseResult parse) { // check whether the metadata already contains a lang value // in which case we might want to skip if (mdSkip != null) { String existingVal = parse.get(url).getMetadata() .getFirstValue(mdSkip); if (StringUtils.isNotBlank(existingVal)) { return; } } String text = parse.get(url).getText(); if (StringUtils.isBlank(text)) { return; } TextObject textObject = textObjectFactory.forText(text); synchronized (languageDetector) { List<DetectedLanguage> probs = languageDetector .getProbabilities(textObject); if (probs == null || probs.size() == 0) { return; } for (DetectedLanguage lang : probs) { if (lang.getProbability() >= minProb) { String code = lang.getLocale().getLanguage(); parse.get(url).getMetadata().addValue(mdKey, code); } } } } #location 9 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeLastQuery; Aggregations aggregs = response.getAggregations(); if (aggregs == null) { isInQuery.set(false); return; } SingleBucketAggregation sample = aggregs.get("sample"); if (sample != null) { aggregs = sample.getAggregations(); } Terms agg = aggregs.get("partition"); int numhits = 0; int numBuckets = 0; int alreadyprocessed = 0; Date mostRecentDateFound = null; SimpleDateFormat formatter = new SimpleDateFormat( "yyyy-MM-dd'T'HH:mm:ss.SSSX"); synchronized (buffer) { // For each entry Iterator<Terms.Bucket> iterator = (Iterator<Bucket>) agg .getBuckets().iterator(); while (iterator.hasNext()) { Terms.Bucket entry = iterator.next(); String key = (String) entry.getKey(); // bucket key long docCount = entry.getDocCount(); // Doc count int hitsForThisBucket = 0; // filter results so that we don't include URLs we are already // being processed TopHits topHits = entry.getAggregations().get("docs"); for (SearchHit hit : topHits.getHits().getHits()) { hitsForThisBucket++; Map<String, Object> keyValues = hit.getSourceAsMap(); String url = (String) keyValues.get("url"); LOG.debug("{} -> id [{}], _source [{}]", logIdprefix, hit.getId(), hit.getSourceAsString()); // consider only the first document of the last bucket // for optimising the nextFetchDate if (hitsForThisBucket == 1 && !iterator.hasNext()) { String strDate = (String) keyValues .get("nextFetchDate"); try { mostRecentDateFound = formatter.parse(strDate); } catch (ParseException e) { throw new RuntimeException("can't parse date :" + strDate); } } // is already being processed - skip it! if (beingProcessed.containsKey(url)) { alreadyprocessed++; continue; } Metadata metadata = fromKeyValues(keyValues); buffer.add(new Values(url, metadata)); } if (hitsForThisBucket > 0) numBuckets++; numhits += hitsForThisBucket; LOG.debug("{} key [{}], hits[{}], doc_count [{}]", logIdprefix, key, hitsForThisBucket, docCount, alreadyprocessed); } // Shuffle the URLs so that we don't get blocks of URLs from the // same // host or domain Collections.shuffle((List) buffer); } LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numhits, numBuckets, timeTaken, alreadyprocessed); queryTimes.addMeasurement(timeTaken); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numhits); // reset the value for next fetch date if the previous one is too old if (resetFetchDateAfterNSecs != -1) { Calendar diffCal = Calendar.getInstance(); diffCal.setTime(lastDate); diffCal.add(Calendar.SECOND, resetFetchDateAfterNSecs); // compare to now if (diffCal.before(Calendar.getInstance())) { LOG.info( "{} lastDate set to null based on resetFetchDateAfterNSecs {}", logIdprefix, resetFetchDateAfterNSecs); lastDate = null; } } // optimise the nextFetchDate by getting the most recent value // returned in the query and add to it, unless the previous value is // within n mins in which case we'll keep it else if (mostRecentDateFound != null && recentDateIncrease >= 0) { Calendar potentialNewDate = Calendar.getInstance(); potentialNewDate.setTime(mostRecentDateFound); potentialNewDate.add(Calendar.MINUTE, recentDateIncrease); Date oldDate = null; // check boundaries if (this.recentDateMinGap > 0) { Calendar low = Calendar.getInstance(); low.setTime(lastDate); low.add(Calendar.MINUTE, -recentDateMinGap); Calendar high = Calendar.getInstance(); high.setTime(lastDate); high.add(Calendar.MINUTE, recentDateMinGap); if (high.before(potentialNewDate) || low.after(potentialNewDate)) { oldDate = lastDate; } } else { oldDate = lastDate; } if (oldDate != null) { lastDate = potentialNewDate.getTime(); LOG.info( "{} lastDate changed from {} to {} based on mostRecentDateFound {}", logIdprefix, oldDate, lastDate, mostRecentDateFound); } else { LOG.info( "{} lastDate kept at {} based on mostRecentDateFound {}", logIdprefix, lastDate, mostRecentDateFound); } } // change the date if we don't get any results at all if (numBuckets == 0) { lastDate = null; } // remove lock isInQuery.set(false); }
#vulnerable code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeLastQuery; Aggregations aggregs = response.getAggregations(); if (aggregs == null) { isInQuery.set(false); return; } SingleBucketAggregation sample = aggregs.get("sample"); if (sample != null) { aggregs = sample.getAggregations(); } Terms agg = aggregs.get("partition"); int numhits = 0; int numBuckets = 0; int alreadyprocessed = 0; Date mostRecentDateFound = null; SimpleDateFormat formatter = new SimpleDateFormat( "yyyy-MM-dd'T'HH:mm:ss.SSSX"); synchronized (buffer) { // For each entry Iterator<Terms.Bucket> iterator = (Iterator<Bucket>) agg .getBuckets().iterator(); while (iterator.hasNext()) { Terms.Bucket entry = iterator.next(); String key = (String) entry.getKey(); // bucket key long docCount = entry.getDocCount(); // Doc count int hitsForThisBucket = 0; // filter results so that we don't include URLs we are already // being processed TopHits topHits = entry.getAggregations().get("docs"); for (SearchHit hit : topHits.getHits().getHits()) { hitsForThisBucket++; Map<String, Object> keyValues = hit.getSourceAsMap(); String url = (String) keyValues.get("url"); LOG.debug("{} -> id [{}], _source [{}]", logIdprefix, hit.getId(), hit.getSourceAsString()); // consider only the first document of the last bucket // for optimising the nextFetchDate if (hitsForThisBucket == 1 && !iterator.hasNext()) { String strDate = (String) keyValues .get("nextFetchDate"); try { mostRecentDateFound = formatter.parse(strDate); } catch (ParseException e) { throw new RuntimeException("can't parse date :" + strDate); } } // is already being processed - skip it! if (beingProcessed.containsKey(url)) { alreadyprocessed++; continue; } Metadata metadata = fromKeyValues(keyValues); buffer.add(new Values(url, metadata)); } if (hitsForThisBucket > 0) numBuckets++; numhits += hitsForThisBucket; LOG.debug("{} key [{}], hits[{}], doc_count [{}]", logIdprefix, key, hitsForThisBucket, docCount, alreadyprocessed); } // Shuffle the URLs so that we don't get blocks of URLs from the // same // host or domain Collections.shuffle((List) buffer); } LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numhits, numBuckets, timeTaken, alreadyprocessed); esQueryTimes.addMeasurement(timeTaken); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numhits); // reset the value for next fetch date if the previous one is too old if (resetFetchDateAfterNSecs != -1) { Calendar diffCal = Calendar.getInstance(); diffCal.setTime(lastDate); diffCal.add(Calendar.SECOND, resetFetchDateAfterNSecs); // compare to now if (diffCal.before(Calendar.getInstance())) { LOG.info( "{} lastDate set to null based on resetFetchDateAfterNSecs {}", logIdprefix, resetFetchDateAfterNSecs); lastDate = null; } } // optimise the nextFetchDate by getting the most recent value // returned in the query and add to it, unless the previous value is // within n mins in which case we'll keep it else if (mostRecentDateFound != null && recentDateIncrease >= 0) { Calendar potentialNewDate = Calendar.getInstance(); potentialNewDate.setTime(mostRecentDateFound); potentialNewDate.add(Calendar.MINUTE, recentDateIncrease); Date oldDate = null; // check boundaries if (this.recentDateMinGap > 0) { Calendar low = Calendar.getInstance(); low.setTime(lastDate); low.add(Calendar.MINUTE, -recentDateMinGap); Calendar high = Calendar.getInstance(); high.setTime(lastDate); high.add(Calendar.MINUTE, recentDateMinGap); if (high.before(potentialNewDate) || low.after(potentialNewDate)) { oldDate = lastDate; } } else { oldDate = lastDate; } if (oldDate != null) { lastDate = potentialNewDate.getTime(); LOG.info( "{} lastDate changed from {} to {} based on mostRecentDateFound {}", logIdprefix, oldDate, lastDate, mostRecentDateFound); } else { LOG.info( "{} lastDate kept at {} based on mostRecentDateFound {}", logIdprefix, lastDate, mostRecentDateFound); } } // change the date if we don't get any results at all if (numBuckets == 0) { lastDate = null; } // remove lock isInQuery.set(false); } #location 92 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeLastQuery; Aggregations aggregs = response.getAggregations(); if (aggregs == null) { isInQuery.set(false); return; } SingleBucketAggregation sample = aggregs.get("sample"); if (sample != null) { aggregs = sample.getAggregations(); } Terms agg = aggregs.get("partition"); int numhits = 0; int numBuckets = 0; int alreadyprocessed = 0; Date mostRecentDateFound = null; SimpleDateFormat formatter = new SimpleDateFormat( "yyyy-MM-dd'T'HH:mm:ss.SSSX"); synchronized (buffer) { // For each entry Iterator<Terms.Bucket> iterator = (Iterator<Bucket>) agg .getBuckets().iterator(); while (iterator.hasNext()) { Terms.Bucket entry = iterator.next(); String key = (String) entry.getKey(); // bucket key long docCount = entry.getDocCount(); // Doc count int hitsForThisBucket = 0; // filter results so that we don't include URLs we are already // being processed TopHits topHits = entry.getAggregations().get("docs"); for (SearchHit hit : topHits.getHits().getHits()) { hitsForThisBucket++; Map<String, Object> keyValues = hit.getSourceAsMap(); String url = (String) keyValues.get("url"); LOG.debug("{} -> id [{}], _source [{}]", logIdprefix, hit.getId(), hit.getSourceAsString()); // consider only the first document of the last bucket // for optimising the nextFetchDate if (hitsForThisBucket == 1 && !iterator.hasNext()) { String strDate = (String) keyValues .get("nextFetchDate"); try { mostRecentDateFound = formatter.parse(strDate); } catch (ParseException e) { throw new RuntimeException("can't parse date :" + strDate); } } // is already being processed - skip it! if (beingProcessed.containsKey(url)) { alreadyprocessed++; continue; } Metadata metadata = fromKeyValues(keyValues); buffer.add(new Values(url, metadata)); } if (hitsForThisBucket > 0) numBuckets++; numhits += hitsForThisBucket; LOG.debug("{} key [{}], hits[{}], doc_count [{}]", logIdprefix, key, hitsForThisBucket, docCount, alreadyprocessed); } // Shuffle the URLs so that we don't get blocks of URLs from the // same // host or domain Collections.shuffle((List) buffer); } LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numhits, numBuckets, timeTaken, alreadyprocessed); queryTimes.addMeasurement(timeTaken); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numhits); // optimise the nextFetchDate by getting the most recent value // returned in the query and add to it, unless the previous value is // within n mins in which case we'll keep it if (mostRecentDateFound != null && recentDateIncrease >= 0) { Calendar potentialNewDate = Calendar.getInstance(); potentialNewDate.setTime(mostRecentDateFound); potentialNewDate.add(Calendar.MINUTE, recentDateIncrease); Date oldDate = null; // check boundaries if (this.recentDateMinGap > 0) { Calendar low = Calendar.getInstance(); low.setTime(queryDate); low.add(Calendar.MINUTE, -recentDateMinGap); Calendar high = Calendar.getInstance(); high.setTime(queryDate); high.add(Calendar.MINUTE, recentDateMinGap); if (high.before(potentialNewDate) || low.after(potentialNewDate)) { oldDate = queryDate; } } else { oldDate = queryDate; } if (oldDate != null) { queryDate = potentialNewDate.getTime(); LOG.info( "{} lastDate changed from {} to {} based on mostRecentDateFound {}", logIdprefix, oldDate, queryDate, mostRecentDateFound); } else { LOG.info( "{} lastDate kept at {} based on mostRecentDateFound {}", logIdprefix, queryDate, mostRecentDateFound); } } // reset the value for next fetch date if the previous one is too old if (resetFetchDateAfterNSecs != -1) { Instant changeNeededOn = Instant.ofEpochMilli(lastTimeResetToNOW .toEpochMilli() + (resetFetchDateAfterNSecs * 1000)); if (Instant.now().isAfter(changeNeededOn)) { LOG.info( "{} lastDate set to null based on resetFetchDateAfterNSecs {}", logIdprefix, resetFetchDateAfterNSecs); queryDate = null; } } // change the date if we don't get any results at all if (numBuckets == 0) { queryDate = null; } // remove lock isInQuery.set(false); }
#vulnerable code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeLastQuery; Aggregations aggregs = response.getAggregations(); if (aggregs == null) { isInQuery.set(false); return; } SingleBucketAggregation sample = aggregs.get("sample"); if (sample != null) { aggregs = sample.getAggregations(); } Terms agg = aggregs.get("partition"); int numhits = 0; int numBuckets = 0; int alreadyprocessed = 0; Date mostRecentDateFound = null; SimpleDateFormat formatter = new SimpleDateFormat( "yyyy-MM-dd'T'HH:mm:ss.SSSX"); synchronized (buffer) { // For each entry Iterator<Terms.Bucket> iterator = (Iterator<Bucket>) agg .getBuckets().iterator(); while (iterator.hasNext()) { Terms.Bucket entry = iterator.next(); String key = (String) entry.getKey(); // bucket key long docCount = entry.getDocCount(); // Doc count int hitsForThisBucket = 0; // filter results so that we don't include URLs we are already // being processed TopHits topHits = entry.getAggregations().get("docs"); for (SearchHit hit : topHits.getHits().getHits()) { hitsForThisBucket++; Map<String, Object> keyValues = hit.getSourceAsMap(); String url = (String) keyValues.get("url"); LOG.debug("{} -> id [{}], _source [{}]", logIdprefix, hit.getId(), hit.getSourceAsString()); // consider only the first document of the last bucket // for optimising the nextFetchDate if (hitsForThisBucket == 1 && !iterator.hasNext()) { String strDate = (String) keyValues .get("nextFetchDate"); try { mostRecentDateFound = formatter.parse(strDate); } catch (ParseException e) { throw new RuntimeException("can't parse date :" + strDate); } } // is already being processed - skip it! if (beingProcessed.containsKey(url)) { alreadyprocessed++; continue; } Metadata metadata = fromKeyValues(keyValues); buffer.add(new Values(url, metadata)); } if (hitsForThisBucket > 0) numBuckets++; numhits += hitsForThisBucket; LOG.debug("{} key [{}], hits[{}], doc_count [{}]", logIdprefix, key, hitsForThisBucket, docCount, alreadyprocessed); } // Shuffle the URLs so that we don't get blocks of URLs from the // same // host or domain Collections.shuffle((List) buffer); } LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numhits, numBuckets, timeTaken, alreadyprocessed); queryTimes.addMeasurement(timeTaken); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numhits); // reset the value for next fetch date if the previous one is too old if (resetFetchDateAfterNSecs != -1) { Calendar diffCal = Calendar.getInstance(); diffCal.setTime(lastDate); diffCal.add(Calendar.SECOND, resetFetchDateAfterNSecs); // compare to now if (diffCal.before(Calendar.getInstance())) { LOG.info( "{} lastDate set to null based on resetFetchDateAfterNSecs {}", logIdprefix, resetFetchDateAfterNSecs); lastDate = null; } } // optimise the nextFetchDate by getting the most recent value // returned in the query and add to it, unless the previous value is // within n mins in which case we'll keep it else if (mostRecentDateFound != null && recentDateIncrease >= 0) { Calendar potentialNewDate = Calendar.getInstance(); potentialNewDate.setTime(mostRecentDateFound); potentialNewDate.add(Calendar.MINUTE, recentDateIncrease); Date oldDate = null; // check boundaries if (this.recentDateMinGap > 0) { Calendar low = Calendar.getInstance(); low.setTime(lastDate); low.add(Calendar.MINUTE, -recentDateMinGap); Calendar high = Calendar.getInstance(); high.setTime(lastDate); high.add(Calendar.MINUTE, recentDateMinGap); if (high.before(potentialNewDate) || low.after(potentialNewDate)) { oldDate = lastDate; } } else { oldDate = lastDate; } if (oldDate != null) { lastDate = potentialNewDate.getTime(); LOG.info( "{} lastDate changed from {} to {} based on mostRecentDateFound {}", logIdprefix, oldDate, lastDate, mostRecentDateFound); } else { LOG.info( "{} lastDate kept at {} based on mostRecentDateFound {}", logIdprefix, lastDate, mostRecentDateFound); } } // change the date if we don't get any results at all if (numBuckets == 0) { lastDate = null; } // remove lock isInQuery.set(false); } #location 107 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); super.open(stormConf, context, collector); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); // one ES client per JVM synchronized (AggregationSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { logIdprefix = "[" + context.getThisComponentId() + " #" + context.getThisTaskIndex() + "] "; // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("{} assigned shard ID {}", logIdprefix, shardID); } _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 24 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static Client getClient(Map stormConf, String boltType) { List<String> hosts = new LinkedList<>(); Object addresses = stormConf.get("es." + boltType + ".addresses"); // list if (addresses instanceof PersistentVector) { hosts.addAll((PersistentVector) addresses); } // single value? else { hosts.add(addresses.toString()); } String clustername = ConfUtils.getString(stormConf, "es." + boltType + ".cluster.name", "elasticsearch"); // Use Node client if no host is specified // ES will try to find the cluster automatically // and join it if (hosts.size() == 0) { Node node = org.elasticsearch.node.NodeBuilder .nodeBuilder() .settings( ImmutableSettings.settingsBuilder().put( "http.enabled", false)) .clusterName(clustername).client(true).node(); return node.client(); } // if a transport address has been specified // use the transport client - even if it is localhost Settings settings = ImmutableSettings.settingsBuilder() .put("cluster.name", clustername).build(); TransportClient tc = new TransportClient(settings); for (String host : hosts) { String[] hostPort = host.split(":"); // no port specified? use default one int port = 9300; if (hostPort.length == 2) { port = Integer.parseInt(hostPort[1].trim()); } InetSocketTransportAddress ista = new InetSocketTransportAddress( hostPort[0].trim(), port); tc.addTransportAddress(ista); } return tc; }
#vulnerable code public static Client getClient(Map stormConf, String boltType) { String host = ConfUtils.getString(stormConf, "es." + boltType + ".hostname"); String clustername = ConfUtils.getString(stormConf, "es." + boltType + ".cluster.name", "elasticsearch"); // Use Node client if no host is specified // ES will try to find the cluster automatically // and join it if (StringUtils.isBlank(host)) { Node node = org.elasticsearch.node.NodeBuilder .nodeBuilder() .settings( ImmutableSettings.settingsBuilder().put( "http.enabled", false)) .clusterName(clustername).client(true).node(); return node.client(); } // if a transport address has been specified // use the transport client - even if it is localhost Settings settings = ImmutableSettings.settingsBuilder() .put("cluster.name", clustername).build(); return new TransportClient(settings) .addTransportAddress(new InetSocketTransportAddress(host, 9300)); } #location 26 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void store(String url, Status status, Metadata metadata, Date nextFetch) throws Exception { // the mysql insert statement String query = tableName + " (url, status, nextfetchdate, metadata, bucket, host)" + " values (?, ?, ?, ?, ?, ?)"; StringBuffer mdAsString = new StringBuffer(); for (String mdKey : metadata.keySet()) { String[] vals = metadata.getValues(mdKey); for (String v : vals) { mdAsString.append("\t").append(mdKey).append("=").append(v); } } int partition = 0; String partitionKey = partitioner.getPartition(url, metadata); if (maxNumBuckets > 1) { // determine which shard to send to based on the host / domain / IP partition = Math.abs(partitionKey.hashCode() % maxNumBuckets); } // create in table if does not already exist if (status.equals(Status.DISCOVERED)) { query = "INSERT IGNORE INTO " + query; } else query = "REPLACE INTO " + query; PreparedStatement preparedStmt = connection.prepareStatement(query); preparedStmt.setString(1, url); preparedStmt.setString(2, status.toString()); preparedStmt.setObject(3, nextFetch); preparedStmt.setString(4, mdAsString.toString()); preparedStmt.setInt(5, partition); preparedStmt.setString(6, partitionKey); long start = System.currentTimeMillis(); // execute the preparedstatement preparedStmt.execute(); eventCounter.scope("sql_query_number").incrBy(1); averagedMetrics.scope("sql_execute_time").update( System.currentTimeMillis() - start); preparedStmt.close(); }
#vulnerable code @Override public void store(String url, Status status, Metadata metadata, Date nextFetch) throws Exception { // the mysql insert statement String query = tableName + " (url, status, nextfetchdate, metadata, bucket)" + " values (?, ?, ?, ?, ?)"; StringBuffer mdAsString = new StringBuffer(); for (String mdKey : metadata.keySet()) { String[] vals = metadata.getValues(mdKey); for (String v : vals) { mdAsString.append("\t").append(mdKey).append("=").append(v); } } int partition = 0; if (maxNumBuckets > 1) { // determine which queue to send to based on the host / domain / IP String partitionKey = partitioner.getPartition(url, metadata); partition = Math.abs(partitionKey.hashCode() % maxNumBuckets); } // create in table if does not already exist if (status.equals(Status.DISCOVERED)) { query = "INSERT IGNORE INTO " + query; } else query = "REPLACE INTO " + query; PreparedStatement preparedStmt = connection.prepareStatement(query); preparedStmt.setString(1, url); preparedStmt.setString(2, status.toString()); preparedStmt.setObject(3, nextFetch); preparedStmt.setString(4, mdAsString.toString()); preparedStmt.setInt(5, partition); long start = System.currentTimeMillis(); // execute the preparedstatement preparedStmt.execute(); eventCounter.scope("sql_query_number").incrBy(1); averagedMetrics.scope("sql_execute_time").update( System.currentTimeMillis() - start); preparedStmt.close(); } #location 22 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void nextTuple() { // inactive? if (active == false) return; // have anything in the buffer? if (!buffer.isEmpty()) { Values fields = buffer.remove(); String url = fields.get(0).toString(); beingProcessed.add(url); _collector.emit(fields, url); eventCounter.scope("emitted").incrBy(1); return; } // re-populate the buffer populateBuffer(); }
#vulnerable code @Override public void nextTuple() { // inactive? if (active == false) return; // have anything in the buffer? if (!buffer.isEmpty()) { Values fields = buffer.remove(); String url = fields.get(0).toString(); beingProcessed.add(url); this._collector.emit(fields, url); eventCounter.scope("emitted").incrBy(1); return; } // re-populate the buffer populateBuffer(); } #location 16 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); super.open(stormConf, context, collector); partitioner = new URLPartitioner(); partitioner.configure(stormConf); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); // one ES client per JVM synchronized (ElasticSearchSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("Assigned shard ID {}", shardID); } partitioner = new URLPartitioner(); partitioner.configure(stormConf); _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 43 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private List<RegexRule> readRules(String rulesFile) { List<RegexRule> rules = new ArrayList<RegexRule>(); try { InputStream regexStream = getClass().getClassLoader() .getResourceAsStream(rulesFile); Reader reader = new InputStreamReader(regexStream, StandardCharsets.UTF_8); BufferedReader in = new BufferedReader(reader); String line; while ((line = in.readLine()) != null) { if (line.length() == 0) { continue; } char first = line.charAt(0); boolean sign = false; switch (first) { case '+': sign = true; break; case '-': sign = false; break; case ' ': case '\n': case '#': // skip blank & comment lines continue; default: throw new IOException("Invalid first character: " + line); } String regex = line.substring(1); LOG.trace("Adding rule [{}]", regex); RegexRule rule = createRule(sign, regex); rules.add(rule); } } catch (IOException e) { LOG.error("There was an error reading the default-regex-filters file"); e.printStackTrace(); } return rules; }
#vulnerable code private List<RegexRule> readRules(String rulesFile) { List<RegexRule> rules = new ArrayList<RegexRule>(); try { InputStream regexStream = getClass().getClassLoader() .getResourceAsStream(rulesFile); Reader reader = new InputStreamReader(regexStream, "UTF-8"); BufferedReader in = new BufferedReader(reader); String line; while ((line = in.readLine()) != null) { if (line.length() == 0) { continue; } char first = line.charAt(0); boolean sign = false; switch (first) { case '+': sign = true; break; case '-': sign = false; break; case ' ': case '\n': case '#': // skip blank & comment lines continue; default: throw new IOException("Invalid first character: " + line); } String regex = line.substring(1); LOG.trace("Adding rule [{}]", regex); RegexRule rule = createRule(sign, regex); rules.add(rule); } } catch (IOException e) { LOG.error("There was an error reading the default-regex-filters file"); e.printStackTrace(); } return rules; } #location 30 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private List<RegexRule> readRules(String rulesFile) { List<RegexRule> rules = new ArrayList<RegexRule>(); try { InputStream regexStream = getClass().getClassLoader() .getResourceAsStream(rulesFile); Reader reader = new InputStreamReader(regexStream, StandardCharsets.UTF_8); BufferedReader in = new BufferedReader(reader); String line; while ((line = in.readLine()) != null) { if (line.length() == 0) { continue; } RegexRule rule = createRule(line); if (rule != null) { rules.add(rule); } } } catch (IOException e) { LOG.error("There was an error reading the default-regex-filters file"); e.printStackTrace(); } return rules; }
#vulnerable code private List<RegexRule> readRules(String rulesFile) { List<RegexRule> rules = new ArrayList<RegexRule>(); try { InputStream regexStream = getClass().getClassLoader() .getResourceAsStream(rulesFile); Reader reader = new InputStreamReader(regexStream, StandardCharsets.UTF_8); BufferedReader in = new BufferedReader(reader); String line; while ((line = in.readLine()) != null) { if (line.length() == 0) { continue; } char first = line.charAt(0); boolean sign = false; switch (first) { case '+': sign = true; break; case '-': sign = false; break; case ' ': case '\n': case '#': // skip blank & comment lines continue; default: throw new IOException("Invalid first character: " + line); } String regex = line.substring(1); LOG.trace("Adding rule [{}]", regex); RegexRule rule = createRule(sign, regex); rules.add(rule); } } catch (IOException e) { LOG.error("There was an error reading the default-regex-filters file"); e.printStackTrace(); } return rules; } #location 31 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); super.open(stormConf, context, collector); partitioner = new URLPartitioner(); partitioner.configure(stormConf); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); // one ES client per JVM synchronized (ElasticSearchSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("Assigned shard ID {}", shardID); } partitioner = new URLPartitioner(); partitioner.configure(stormConf); _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 60 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public Date schedule(Status status, Metadata metadata) { LOG.debug("Scheduling status: {}, metadata: {}", status, metadata); String signature = metadata.getFirstValue(SIGNATURE_KEY); String oldSignature = metadata.getFirstValue(SIGNATURE_OLD_KEY); if (status != Status.FETCHED) { // reset all metadata metadata.remove(SIGNATURE_MODIFIED_KEY); metadata.remove(FETCH_INTERVAL_KEY); metadata.remove(SIGNATURE_KEY); metadata.remove(SIGNATURE_OLD_KEY); // fall-back to DefaultScheduler return super.schedule(status, metadata); } Calendar now = Calendar.getInstance(Locale.ROOT); String signatureModified = metadata .getFirstValue(SIGNATURE_MODIFIED_KEY); boolean changed = false; final String modifiedTimeString = httpDateFormat.format(now.getTime()); if (metadata.getFirstValue("fetch.statusCode").equals("304")) { // HTTP 304 Not Modified // - no new signature calculated because no content fetched // - do not compare persisted signatures } else if (signature == null || oldSignature == null) { // no decision possible by signature comparison if // - document not parsed (intentionally or not) or // - signature not generated or // - old signature not copied // fall-back to DefaultScheduler LOG.debug("No signature for FETCHED page: {}", metadata); return super.schedule(status, metadata); } else if (signature.equals(oldSignature)) { // unchanged } else { // change detected by signature comparison changed = true; signatureModified = modifiedTimeString; if (setLastModified) { metadata.setValue(HttpHeaders.LAST_MODIFIED, modifiedTimeString); } } String fetchInterval = metadata.getFirstValue(FETCH_INTERVAL_KEY); int interval = defaultfetchInterval; if (fetchInterval != null) { interval = Integer.parseInt(fetchInterval); } else { // initialize from DefaultScheduler Optional<Integer> customInterval = super.checkCustomInterval( metadata, status); if (customInterval.isPresent()) { interval = customInterval.get(); } else { interval = defaultfetchInterval; } fetchInterval = Integer.toString(interval); } if (changed) { // shrink fetch interval (slow down decrementing if already close to // the minimum interval) interval = (int) ((1.0f - fetchIntervalDecRate) * interval + fetchIntervalDecRate * minFetchInterval); LOG.debug( "Signature has changed, fetchInterval decreased from {} to {}", fetchInterval, interval); } else { // no change or not modified, increase fetch interval interval = (int) (interval * (1.0f + fetchIntervalIncRate)); if (interval > maxFetchInterval) { interval = maxFetchInterval; } LOG.debug("Unchanged, fetchInterval increased from {} to {}", fetchInterval, interval); // remove old signature (do not keep same signature twice) metadata.remove(SIGNATURE_OLD_KEY); if (signatureModified == null) { signatureModified = modifiedTimeString; } } metadata.setValue(FETCH_INTERVAL_KEY, Integer.toString(interval)); metadata.setValue(SIGNATURE_MODIFIED_KEY, signatureModified); now.add(Calendar.MINUTE, interval); return now.getTime(); }
#vulnerable code @Override public Date schedule(Status status, Metadata metadata) { LOG.debug("Scheduling status: {}, metadata: {}", status, metadata); String signature = metadata.getFirstValue(SIGNATURE_KEY); String oldSignature = metadata.getFirstValue(SIGNATURE_OLD_KEY); if (status != Status.FETCHED) { // reset all metadata metadata.remove(SIGNATURE_MODIFIED_KEY); metadata.remove(FETCH_INTERVAL_KEY); metadata.remove(SIGNATURE_KEY); metadata.remove(SIGNATURE_OLD_KEY); // fall-back to DefaultScheduler return super.schedule(status, metadata); } Calendar now = Calendar.getInstance(Locale.ROOT); String signatureModified = metadata .getFirstValue(SIGNATURE_MODIFIED_KEY); boolean changed = false; final String modifiedTimeString = httpDateFormat.format(now.getTime()); if (signature == null || oldSignature == null) { // no decision possible by signature comparison if // - document not parsed (intentionally or not) or // - signature not generated or // - old signature not copied if (metadata.getFirstValue("fetch.statusCode").equals("304")) { // HTTP 304 Not Modified } else { // fall-back to DefaultScheduler LOG.debug("No signature for FETCHED page: {}", metadata); return super.schedule(status, metadata); } } else if (signature.equals(oldSignature)) { // unchanged, remove old signature (do not keep same signature // twice) metadata.remove(SIGNATURE_OLD_KEY); if (signatureModified == null) signatureModified = modifiedTimeString; } else { // change detected by signature comparison changed = true; signatureModified = modifiedTimeString; if (setLastModified) metadata.setValue(HttpHeaders.LAST_MODIFIED, modifiedTimeString); } String fetchInterval = metadata.getFirstValue(FETCH_INTERVAL_KEY); int interval = defaultfetchInterval; if (fetchInterval != null) { interval = Integer.parseInt(fetchInterval); } else { // initialize from DefaultScheduler Optional<Integer> customInterval = super.checkCustomInterval( metadata, status); if (customInterval.isPresent()) interval = customInterval.get(); else interval = defaultfetchInterval; fetchInterval = Integer.toString(interval); } if (changed) { // shrink fetch interval (slow down decrementing if already close to // the minimum interval) interval = (int) ((1.0f - fetchIntervalDecRate) * interval + fetchIntervalDecRate * minFetchInterval); LOG.debug( "Signature has changed, fetchInterval decreased from {} to {}", fetchInterval, interval); } else { // no change or not modified, increase fetch interval interval = (int) (interval * (1.0f + fetchIntervalIncRate)); if (interval > maxFetchInterval) interval = maxFetchInterval; LOG.debug("Unchanged, fetchInterval increased from {} to {}", fetchInterval, interval); } metadata.setValue(FETCH_INTERVAL_KEY, Integer.toString(interval)); metadata.setValue(SIGNATURE_MODIFIED_KEY, signatureModified); now.add(Calendar.MINUTE, interval); return now.getTime(); } #location 35 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private static String getCharsetFromBOM(final byte[] byteData) { try (BOMInputStream bomIn = new BOMInputStream( new ByteArrayInputStream(byteData))) { ByteOrderMark bom = bomIn.getBOM(); if (bom != null) { return bom.getCharsetName(); } } catch (IOException e) { return null; } return null; }
#vulnerable code private static String getCharsetFromBOM(final byte[] byteData) { BOMInputStream bomIn = new BOMInputStream(new ByteArrayInputStream( byteData)); try { ByteOrderMark bom = bomIn.getBOM(); if (bom != null) { return bom.getCharsetName(); } } catch (IOException e) { return null; } return null; } #location 5 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void nextTuple() { if (!active) return; // synchronize access to buffer needed in case of asynchronous // queries to the backend synchronized (buffer) { // force the refresh of the buffer even if the buffer is not empty if (!isInQuery.get() && triggerQueries()) { populateBuffer(); } if (!buffer.isEmpty()) { // track how long the buffer had been empty for if (timestampEmptyBuffer != -1) { eventCounter.scope("empty.buffer").incrBy( System.currentTimeMillis() - timestampEmptyBuffer); timestampEmptyBuffer = -1; } List<Object> fields = buffer.remove(); String url = fields.get(0).toString(); this._collector.emit(fields, url); beingProcessed.put(url, null); in_buffer.remove(url); eventCounter.scope("emitted").incrBy(1); return; } else if (timestampEmptyBuffer == -1) { timestampEmptyBuffer = System.currentTimeMillis(); } } if (isInQuery.get() || throttleQueries() > 0) { // sleep for a bit but not too much in order to give ack/fail a // chance Utils.sleep(10); return; } // re-populate the buffer populateBuffer(); timeLastQuerySent = System.currentTimeMillis(); }
#vulnerable code @Override public void nextTuple() { if (!active) return; // synchronize access to buffer needed in case of asynchronous // queries to the backend synchronized (buffer) { if (!buffer.isEmpty()) { // track how long the buffer had been empty for if (timestampEmptyBuffer != -1) { eventCounter.scope("empty.buffer").incrBy( System.currentTimeMillis() - timestampEmptyBuffer); timestampEmptyBuffer = -1; } List<Object> fields = buffer.remove(); String url = fields.get(0).toString(); this._collector.emit(fields, url); beingProcessed.put(url, null); eventCounter.scope("emitted").incrBy(1); return; } else if (timestampEmptyBuffer == -1) { timestampEmptyBuffer = System.currentTimeMillis(); } } if (isInQuery.get() || throttleQueries() > 0) { // sleep for a bit but not too much in order to give ack/fail a // chance Utils.sleep(10); return; } // re-populate the buffer populateBuffer(); timeLastQuery = System.currentTimeMillis(); } #location 27 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); super.open(stormConf, context, collector); partitioner = new URLPartitioner(); partitioner.configure(stormConf); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); // one ES client per JVM synchronized (ElasticSearchSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("Assigned shard ID {}", shardID); } partitioner = new URLPartitioner(); partitioner.configure(stormConf); _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 18 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeLastQuery; SearchHit[] hits = response.getHits().getHits(); int numBuckets = hits.length; int alreadyprocessed = 0; int numDocs = 0; synchronized (buffer) { for (SearchHit hit : hits) { Map<String, SearchHits> innerHits = hit.getInnerHits(); // wanted just one per bucket : no inner hits if (innerHits == null) { numDocs++; if (!addHitToBuffer(hit)) { alreadyprocessed++; } continue; } // more than one per bucket SearchHits inMyBucket = innerHits.get("urls_per_bucket"); for (SearchHit subHit : inMyBucket.getHits()) { numDocs++; if (!addHitToBuffer(subHit)) { alreadyprocessed++; } } } // Shuffle the URLs so that we don't get blocks of URLs from the // same host or domain if (numBuckets != numDocs) { Collections.shuffle((List) buffer); } } queryTimes.addMeasurement(timeTaken); // could be derived from the count of query times above eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numDocs); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numDocs, numBuckets, timeTaken, alreadyprocessed); // reset the value for next fetch date if the previous one is too old if (resetFetchDateAfterNSecs != -1) { Instant changeNeededOn = Instant.ofEpochMilli(lastTimeResetToNOW .toEpochMilli() + (resetFetchDateAfterNSecs * 1000)); if (Instant.now().isAfter(changeNeededOn)) { LOG.info("lastDate reset based on resetFetchDateAfterNSecs {}", resetFetchDateAfterNSecs); queryDate = null; lastStartOffset = 0; } } // no more results? if (numBuckets == 0) { queryDate = null; lastStartOffset = 0; } // still got some results but paging won't help else if (numBuckets < maxBucketNum) { lastStartOffset = 0; } else { lastStartOffset += numBuckets; } // remove lock isInQuery.set(false); }
#vulnerable code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeLastQuery; SearchHit[] hits = response.getHits().getHits(); int numBuckets = hits.length; // reset the value for next fetch date if the previous one is too old if (resetFetchDateAfterNSecs != -1) { Calendar diffCal = Calendar.getInstance(); diffCal.setTime(lastDate); diffCal.add(Calendar.SECOND, resetFetchDateAfterNSecs); // compare to now if (diffCal.before(Calendar.getInstance())) { LOG.info( "{} lastDate set to null based on resetFetchDateAfterNSecs {}", logIdprefix, resetFetchDateAfterNSecs); lastDate = null; lastStartOffset = 0; } } int alreadyprocessed = 0; int numDocs = 0; synchronized (buffer) { for (SearchHit hit : hits) { Map<String, SearchHits> innerHits = hit.getInnerHits(); // wanted just one per bucket : no inner hits if (innerHits == null) { numDocs++; if (!addHitToBuffer(hit)) { alreadyprocessed++; } continue; } // more than one per bucket SearchHits inMyBucket = innerHits.get("urls_per_bucket"); for (SearchHit subHit : inMyBucket.getHits()) { numDocs++; if (!addHitToBuffer(subHit)) { alreadyprocessed++; } } } // Shuffle the URLs so that we don't get blocks of URLs from the // same host or domain if (numBuckets != numDocs) { Collections.shuffle((List) buffer); } } queryTimes.addMeasurement(timeTaken); // could be derived from the count of query times above eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numDocs); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numDocs, numBuckets, timeTaken, alreadyprocessed); // no more results? if (numBuckets == 0) { lastDate = null; lastStartOffset = 0; } // still got some results but paging won't help else if (numBuckets < maxBucketNum) { lastStartOffset = 0; } else { lastStartOffset += numBuckets; } // remove lock isInQuery.set(false); } #location 11 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); super.open(stormConf, context, collector); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); // one ES client per JVM synchronized (AggregationSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { logIdprefix = "[" + context.getThisComponentId() + " #" + context.getThisTaskIndex() + "] "; // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("{} assigned shard ID {}", logIdprefix, shardID); } _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 50 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); super.open(stormConf, context, collector); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); // one ES client per JVM synchronized (AggregationSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { logIdprefix = "[" + context.getThisComponentId() + " #" + context.getThisTaskIndex() + "] "; // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("{} assigned shard ID {}", logIdprefix, shardID); } _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 66 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void execute(Tuple input) { // triggered by the arrival of a tuple // be it a tick or normal one flushQueues(); if (isTickTuple(input)) { _collector.ack(input); return; } CountMetric metric = metricGauge.scope("activethreads"); metric.getValueAndReset(); metric.incrBy(this.activeThreads.get()); metric = metricGauge.scope("in queues"); metric.getValueAndReset(); metric.incrBy(this.fetchQueues.inQueues.get()); metric = metricGauge.scope("queues"); metric.getValueAndReset(); metric.incrBy(this.fetchQueues.queues.size()); LOG.info("[Fetcher #" + taskIndex + "] Threads : " + this.activeThreads.get() + "\tqueues : " + this.fetchQueues.queues.size() + "\tin_queues : " + this.fetchQueues.inQueues.get()); String url = input.getStringByField("url"); // check whether this tuple has a url field if (url == null) { LOG.info("[Fetcher #" + taskIndex + "] Missing url field for tuple " + input); // ignore silently _collector.ack(input); return; } fetchQueues.addFetchItem(input); }
#vulnerable code @Override public void execute(Tuple input) { // main thread in charge of acking and failing // see // https://github.com/nathanmarz/storm/wiki/Troubleshooting#nullpointerexception-from-deep-inside-storm int acked = 0; int failed = 0; int emitted = 0; // emit with or without anchors // before acking synchronized (emitQueue) { for (Object[] toemit : this.emitQueue) { String streamID = (String) toemit[0]; Tuple anchor = (Tuple) toemit[1]; Values vals = (Values) toemit[2]; if (anchor == null) _collector.emit(streamID, vals); else _collector.emit(streamID, Arrays.asList(anchor), vals); } emitted = emitQueue.size(); emitQueue.clear(); } // have a tick tuple to make sure we don't get starved synchronized (ackQueue) { for (Tuple toack : this.ackQueue) { _collector.ack(toack); } acked = ackQueue.size(); ackQueue.clear(); } synchronized (failQueue) { for (Tuple toack : this.failQueue) { _collector.fail(toack); } failed = failQueue.size(); failQueue.clear(); } if (acked + failed + emitted > 0) LOG.info("[Fetcher #" + taskIndex + "] Acked : " + acked + "\tFailed : " + failed + "\tEmitted : " + emitted); if (isTickTuple(input)) { _collector.ack(input); return; } CountMetric metric = metricGauge.scope("activethreads"); metric.getValueAndReset(); metric.incrBy(this.activeThreads.get()); metric = metricGauge.scope("in queues"); metric.getValueAndReset(); metric.incrBy(this.fetchQueues.inQueues.get()); metric = metricGauge.scope("queues"); metric.getValueAndReset(); metric.incrBy(this.fetchQueues.queues.size()); LOG.info("[Fetcher #" + taskIndex + "] Threads : " + this.activeThreads.get() + "\tqueues : " + this.fetchQueues.queues.size() + "\tin_queues : " + this.fetchQueues.inQueues.get()); String url = input.getStringByField("url"); // check whether this tuple has a url field if (url == null) { LOG.info("[Fetcher #" + taskIndex + "] Missing url field for tuple " + input); // ignore silently _collector.ack(input); return; } fetchQueues.addFetchItem(input); } #location 74 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); super.open(stormConf, context, collector); partitioner = new URLPartitioner(); partitioner.configure(stormConf); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); // one ES client per JVM synchronized (ElasticSearchSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("Assigned shard ID {}", shardID); } partitioner = new URLPartitioner(); partitioner.configure(stormConf); _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 5 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void parseXml() { Result<Geo> result = parseXCalProperty("<latitude>12.34</latitude><longitude>56.78</longitude>", marshaller); Geo prop = result.getValue(); assertEquals(12.34, prop.getLatitude(), 0.001); assertEquals(56.78, prop.getLongitude(), 0.001); assertWarnings(0, result.getWarnings()); }
#vulnerable code @Test public void parseXml() { ICalParameters params = new ICalParameters(); Element element = xcalPropertyElement(marshaller, "<latitude>12.34</latitude><longitude>56.78</longitude>"); Result<Geo> result = marshaller.parseXml(element, params); Geo prop = result.getValue(); assertEquals(12.34, prop.getLatitude(), 0.001); assertEquals(56.78, prop.getLongitude(), 0.001); assertWarnings(0, result.getWarnings()); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void writeXml() { DateTimePropertyImpl prop = new DateTimePropertyImpl(datetime); assertWriteXml("<date-time>2013-06-11T13:43:02Z</date-time>", prop, marshaller); }
#vulnerable code @Test public void writeXml() { DateTimePropertyImpl prop = new DateTimePropertyImpl(datetime); Document actual = xcalProperty(marshaller); marshaller.writeXml(prop, XmlUtils.getRootElement(actual)); Document expected = xcalProperty(marshaller, "<date-time>2013-06-11T13:43:02Z</date-time>"); assertXMLEqual(expected, actual); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void writeXml_missing_both() { Geo prop = new Geo(null, null); assertWriteXml("", prop, marshaller); }
#vulnerable code @Test public void writeXml_missing_both() { Geo prop = new Geo(null, null); Document actual = xcalProperty(marshaller); marshaller.writeXml(prop, XmlUtils.getRootElement(actual)); Document expected = xcalProperty(marshaller, ""); assertXMLEqual(expected, actual); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void escape_newlines() throws Exception { ICalendar ical = new ICalendar(); VEvent event = new VEvent(); event.setSummary("summary\nof event"); ical.addEvent(event); StringWriter sw = new StringWriter(); ICalWriter writer = new ICalWriter(sw); writer.write(ical); writer.close(); //@formatter:off String expected = "BEGIN:VCALENDAR\r\n" + "VERSION:2\\.0\r\n" + "PRODID:.*?\r\n" + "BEGIN:VEVENT\r\n" + "UID:.*?\r\n" + "DTSTAMP:.*?\r\n" + "SUMMARY:summary\\\\nof event\r\n" + "END:VEVENT\r\n" + "END:VCALENDAR\r\n"; //@formatter:on String actual = sw.toString(); assertRegex(expected, actual); }
#vulnerable code @Test public void escape_newlines() throws Exception { ICalendar ical = new ICalendar(); VEvent event = new VEvent(); event.setSummary("summary\nof event"); ical.addEvent(event); StringWriter sw = new StringWriter(); ICalWriter writer = new ICalWriter(sw); writer.write(ical); writer.close(); //@formatter:off String expected = "BEGIN:VCALENDAR\r\n" + "VERSION:2\\.0\r\n" + "PRODID:.*?\r\n" + "BEGIN:VEVENT\r\n" + "UID:.*?\r\n" + "DTSTAMP:.*?\r\n" + "SUMMARY:summary\\\\nof event\r\n" + "END:VEVENT\r\n" + "END:VCALENDAR\r\n"; //@formatter:on String actual = sw.toString(); assertRegex(expected, actual); assertWarnings(0, writer.getWarnings()); } #location 30 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void writeXml_missing_longitude() { Geo prop = new Geo(12.34, null); assertWriteXml("<latitude>12.34</latitude>", prop, marshaller); }
#vulnerable code @Test public void writeXml_missing_longitude() { Geo prop = new Geo(12.34, null); Document actual = xcalProperty(marshaller); marshaller.writeXml(prop, XmlUtils.getRootElement(actual)); Document expected = xcalProperty(marshaller, "<latitude>12.34</latitude>"); assertXMLEqual(expected, actual); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void writeXml() { IntegerProperty prop = new IntegerProperty(5); assertWriteXml("<integer>5</integer>", prop, marshaller); }
#vulnerable code @Test public void writeXml() { IntegerProperty prop = new IntegerProperty(5); Document actual = xcalProperty(marshaller); marshaller.writeXml(prop, XmlUtils.getRootElement(actual)); Document expected = xcalProperty(marshaller, "<integer>5</integer>"); assertXMLEqual(expected, actual); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void experimental_property_marshaller() throws Exception { ICalendar ical = new ICalendar(); ical.addProperty(new TestProperty("one")); ical.addProperty(new TestProperty("two")); StringWriter sw = new StringWriter(); ICalWriter writer = new ICalWriter(sw); writer.registerMarshaller(new TestPropertyMarshaller()); writer.write(ical); writer.close(); //@formatter:off String expected = "BEGIN:VCALENDAR\r\n" + "VERSION:2\\.0\r\n" + "PRODID:.*?\r\n" + "X-TEST:one\r\n" + "X-TEST:two\r\n" + "END:VCALENDAR\r\n"; //@formatter:on String actual = sw.toString(); assertRegex(expected, actual); }
#vulnerable code @Test public void experimental_property_marshaller() throws Exception { ICalendar ical = new ICalendar(); ical.addProperty(new TestProperty("one")); ical.addProperty(new TestProperty("two")); StringWriter sw = new StringWriter(); ICalWriter writer = new ICalWriter(sw); writer.registerMarshaller(new TestPropertyMarshaller()); writer.write(ical); writer.close(); //@formatter:off String expected = "BEGIN:VCALENDAR\r\n" + "VERSION:2\\.0\r\n" + "PRODID:.*?\r\n" + "X-TEST:one\r\n" + "X-TEST:two\r\n" + "END:VCALENDAR\r\n"; //@formatter:on String actual = sw.toString(); assertRegex(expected, actual); assertWarnings(0, writer.getWarnings()); } #location 26 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void writeXml() { RequestStatus prop = new RequestStatus("1.2.3"); prop.setDescription("description"); prop.setExceptionText("data"); assertWriteXml("<code>1.2.3</code><description>description</description><data>data</data>", prop, marshaller); }
#vulnerable code @Test public void writeXml() { RequestStatus prop = new RequestStatus("1.2.3"); prop.setDescription("description"); prop.setExceptionText("data"); Document actual = xcalProperty(marshaller); marshaller.writeXml(prop, XmlUtils.getRootElement(actual)); Document expected = xcalProperty(marshaller, "<code>1.2.3</code><description>description</description><data>data</data>"); assertXMLEqual(expected, actual); } #location 8 #vulnerability type NULL_DEREFERENCE