target
stringlengths
20
113k
src_fm
stringlengths
11
86.3k
src_fm_fc
stringlengths
21
86.4k
src_fm_fc_co
stringlengths
30
86.4k
src_fm_fc_ms
stringlengths
42
86.8k
src_fm_fc_ms_ff
stringlengths
43
86.8k
@Test public void testDefaultJsonFileFormatOptions() throws Exception { JsonFileConfig fileFormat = new JsonFileConfig(); String tableOptions = fileFormat.toTableOptions(); assertContains("type => 'json'", tableOptions); }
public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
@Test public void testDefaultParquetFileFormatOptions() throws Exception { ParquetFileConfig fileFormat = new ParquetFileConfig(); String tableOptions = fileFormat.toTableOptions(); assertContains("type => 'parquet'", tableOptions); }
public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
@Test public void testDefaultAvroFileFormatOptions() throws Exception { AvroFileConfig fileFormat = new AvroFileConfig(); String tableOptions = fileFormat.toTableOptions(); assertContains("type => 'avro'", tableOptions); }
public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
@Test public void testMutatorApplyNoDropNonPreview() { boolean preview = false; TransformResult result = mutator(preview).apply("foo", "foo2", newValue, false); assertEquals(newHashSet("foo2"), result.getAddedColumns()); assertEquals(newHashSet(), result.getModifiedColumns()); assertEquals(newHashSet(), result.getRemovedColumns()); assertColIs(newValue, result, "foo2"); assertColIs(value, result, "foo"); }
public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } DatasetStateMutator(String username, VirtualDatasetState virtualDatasetState, boolean preview); }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } DatasetStateMutator(String username, VirtualDatasetState virtualDatasetState, boolean preview); void setSql(QueryMetadata metadata); void addColumn(int index, Column column); void addColumn(Column column); void moveColumn(int index, int dest); int columnCount(); void addJoin(Join join); void updateColumnTables(); String uniqueColumnName(String column); String getDatasetAlias(); void groupedBy(List<Column> newColumns, List<Column> groupBys); void addFilter(Filter filter); void setOrdersList(List<Order> columnsList); TransformResult result(); int indexOfCol(String colName); void nest(); TransformResult rename(String oldCol, String newCol); TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn); TransformResult apply(String oldCol, String newCol, Expression newExpWrapped, boolean dropSourceColumn); Expression findColValueForModification(String colName); Expression findColValue(String colName); boolean isGrouped(); void dropColumn(String droppedColumnName); }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } DatasetStateMutator(String username, VirtualDatasetState virtualDatasetState, boolean preview); void setSql(QueryMetadata metadata); void addColumn(int index, Column column); void addColumn(Column column); void moveColumn(int index, int dest); int columnCount(); void addJoin(Join join); void updateColumnTables(); String uniqueColumnName(String column); String getDatasetAlias(); void groupedBy(List<Column> newColumns, List<Column> groupBys); void addFilter(Filter filter); void setOrdersList(List<Order> columnsList); TransformResult result(); int indexOfCol(String colName); void nest(); TransformResult rename(String oldCol, String newCol); TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn); TransformResult apply(String oldCol, String newCol, Expression newExpWrapped, boolean dropSourceColumn); Expression findColValueForModification(String colName); Expression findColValue(String colName); boolean isGrouped(); void dropColumn(String droppedColumnName); }
@Test public void testDefaultExcelFileFormatOptions() throws Exception { ExcelFileConfig fileFormat = new ExcelFileConfig(); String tableOptions = fileFormat.toTableOptions(); assertContains("type => 'excel'", tableOptions); assertContains("extractHeader => false", tableOptions); assertContains("hasMergedCells => false", tableOptions); assertContains("xls => false", tableOptions); }
public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
@Test public void testDefaultXlsFileFormatOptions() throws Exception { XlsFileConfig fileFormat = new XlsFileConfig(); String tableOptions = fileFormat.toTableOptions(); assertContains("type => 'excel'", tableOptions); assertContains("extractHeader => false", tableOptions); assertContains("hasMergedCells => false", tableOptions); assertContains("xls => true", tableOptions); }
public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
@Test public void testSheetExcelFile() throws Exception { ExcelFileConfig fileFormat = new ExcelFileConfig(); fileFormat.setSheetName("foo"); String tableOptions = fileFormat.toTableOptions(); assertContains("type => 'excel'", tableOptions); assertContains("xls => false", tableOptions); assertContains("sheet => 'foo'", tableOptions); }
public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
@Test public void testSheetExcelFileWithSingleQuote() throws Exception { ExcelFileConfig fileFormat = new ExcelFileConfig(); fileFormat.setSheetName("fo'o"); String tableOptions = fileFormat.toTableOptions(); assertContains("type => 'excel'", tableOptions); assertContains("xls => false", tableOptions); assertContains("sheet => 'fo''o'", tableOptions); }
public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
@Test public void testSheetXlsFile() throws Exception { XlsFileConfig fileFormat = new XlsFileConfig(); fileFormat.setSheetName("foo"); String tableOptions = fileFormat.toTableOptions(); assertContains("type => 'excel'", tableOptions); assertContains("xls => true", tableOptions); assertContains("sheet => 'foo'", tableOptions); }
public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
@Test public void testExtractHeaderExcelFile() throws Exception { ExcelFileConfig fileFormat = new ExcelFileConfig(); fileFormat.setExtractHeader(true); String tableOptions = fileFormat.toTableOptions(); assertContains("type => 'excel'", tableOptions); assertContains("xls => false", tableOptions); assertContains("extractHeader => true", tableOptions); }
public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
@Test public void testHasMergedCellsExcelFile() throws Exception { ExcelFileConfig fileFormat = new ExcelFileConfig(); fileFormat.setHasMergedCells(true); String tableOptions = fileFormat.toTableOptions(); assertContains("type => 'excel'", tableOptions); assertContains("xls => false", tableOptions); assertContains("hasMergedCells => true", tableOptions); }
public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
@Test public void testExtractHeaderXlsFile() throws Exception { XlsFileConfig fileFormat = new XlsFileConfig(); fileFormat.setExtractHeader(true); String tableOptions = fileFormat.toTableOptions(); assertContains("type => 'excel'", tableOptions); assertContains("xls => true", tableOptions); assertContains("extractHeader => true", tableOptions); }
public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
@Test public void testHasMergedCellsXlsFile() throws Exception { XlsFileConfig fileFormat = new XlsFileConfig(); fileFormat.setHasMergedCells(true); String tableOptions = fileFormat.toTableOptions(); assertContains("type => 'excel'", tableOptions); assertContains("xls => true", tableOptions); assertContains("hasMergedCells => true", tableOptions); }
public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
FileFormat { public String toTableOptions() throws IllegalArgumentException { final StringBuilder stringBuilder = new StringBuilder(); switch (getFileType()) { case TEXT: case CSV: case TSV: case PSV: final TextFileConfig textFileConfig = (TextFileConfig)this; stringBuilder.append("type => 'text', "); stringBuilder.append(format("fieldDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getFieldDelimiter()))); stringBuilder.append(format("comment => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getComment())))); stringBuilder.append(format("%1$sescape%1$s => %2$s, ", SqlUtils.QUOTE, SqlUtils.stringLiteral(singleChar(textFileConfig.getEscape())))); stringBuilder.append(format("quote => %s, ", SqlUtils.stringLiteral(singleChar(textFileConfig.getQuote())))); stringBuilder.append(format("lineDelimiter => %s, ", SqlUtils.stringLiteral(textFileConfig.getLineDelimiter()))); stringBuilder.append(format("extractHeader => %s, ", textFileConfig.getExtractHeader().toString())); stringBuilder.append(format("skipFirstLine => %s, ", textFileConfig.getSkipFirstLine().toString())); stringBuilder.append(format("autoGenerateColumnNames => %s, ", textFileConfig.getAutoGenerateColumnNames().toString())); stringBuilder.append(format("trimHeader => %s", textFileConfig.getTrimHeader().toString())); return stringBuilder.toString(); case JSON: return "type => 'json'"; case PARQUET: return "type => 'parquet'"; case ICEBERG: return "type => 'iceberg'"; case AVRO: return "type => 'avro'"; case HTTP_LOG: case UNKNOWN: throw new UnsupportedOperationException("HTTP LOG and UNKNOWN file formats are not supported"); case EXCEL: { final ExcelFileConfig excelFileConfig = (ExcelFileConfig) this; stringBuilder.append("type => 'excel', "); if (excelFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(excelFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", excelFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", excelFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => false ")); return stringBuilder.toString(); } case XLS: { final XlsFileConfig xlsFileConfig = (XlsFileConfig) this; stringBuilder.append("type => 'excel', "); if (xlsFileConfig.getSheetName() != null) { stringBuilder.append(format("sheet => %s, ", SqlUtils.stringLiteral(xlsFileConfig.getSheetName()))); } stringBuilder.append(format("extractHeader => %s, ", xlsFileConfig.getExtractHeader().toString())); stringBuilder.append(format("hasMergedCells => %s, ", xlsFileConfig.getHasMergedCells().toString())); stringBuilder.append(format("xls => true ")); return stringBuilder.toString(); } default: throw new IllegalArgumentException("Invalid file format type " + getFileType()); } } @Override String toString(); String toTableOptions(); String getName(); String getOwner(); boolean getIsFolder(); void setIsFolder(boolean isFolder); String getLocation(); List<String> getFullPath(); long getCtime(); String getVersion(); void setName(String name); void setOwner(String owner); void setFullPath(List<String> fullPath); void setCtime(long ctime); void setVersion(String version); void setLocation(String location); @JsonIgnore @SuppressWarnings({ "rawtypes", "unchecked" }) FileConfig asFileConfig(); @JsonIgnore FileType getFileType(); static FileFormat getForFile(FileConfig fileConfig); static FileFormat getForFolder(FileConfig fileConfig); static FileFormat getEmptyConfig(FileType type); static String getExtension(FileType type); static FileType getFileFormatType(List<String> extensions); }
@Test(expected = NullPointerException.class) public void testNullSegmentThrowsNPE() { PathUtils.join("", null, ""); }
public static final String join(final String... parts) { final StringBuilder sb = new StringBuilder(); for (final String part:parts) { Preconditions.checkNotNull(part, "parts cannot contain null"); if (!Strings.isNullOrEmpty(part)) { sb.append(part).append("/"); } } if (sb.length() > 0) { sb.deleteCharAt(sb.length() - 1); } final String path = sb.toString(); return normalize(path); }
PathUtils { public static final String join(final String... parts) { final StringBuilder sb = new StringBuilder(); for (final String part:parts) { Preconditions.checkNotNull(part, "parts cannot contain null"); if (!Strings.isNullOrEmpty(part)) { sb.append(part).append("/"); } } if (sb.length() > 0) { sb.deleteCharAt(sb.length() - 1); } final String path = sb.toString(); return normalize(path); } }
PathUtils { public static final String join(final String... parts) { final StringBuilder sb = new StringBuilder(); for (final String part:parts) { Preconditions.checkNotNull(part, "parts cannot contain null"); if (!Strings.isNullOrEmpty(part)) { sb.append(part).append("/"); } } if (sb.length() > 0) { sb.deleteCharAt(sb.length() - 1); } final String path = sb.toString(); return normalize(path); } }
PathUtils { public static final String join(final String... parts) { final StringBuilder sb = new StringBuilder(); for (final String part:parts) { Preconditions.checkNotNull(part, "parts cannot contain null"); if (!Strings.isNullOrEmpty(part)) { sb.append(part).append("/"); } } if (sb.length() > 0) { sb.deleteCharAt(sb.length() - 1); } final String path = sb.toString(); return normalize(path); } static final String join(final String... parts); static final String normalize(final String path); }
PathUtils { public static final String join(final String... parts) { final StringBuilder sb = new StringBuilder(); for (final String part:parts) { Preconditions.checkNotNull(part, "parts cannot contain null"); if (!Strings.isNullOrEmpty(part)) { sb.append(part).append("/"); } } if (sb.length() > 0) { sb.deleteCharAt(sb.length() - 1); } final String path = sb.toString(); return normalize(path); } static final String join(final String... parts); static final String normalize(final String path); }
@Test public void testMutatorApplyReplaceNonPreview() { boolean preview = false; TransformResult result1 = mutator(preview).apply("foo", "foo", newValue, true); assertEquals(newHashSet(), result1.getAddedColumns()); assertEquals(newHashSet("foo"), result1.getModifiedColumns()); assertEquals(newHashSet(), result1.getRemovedColumns()); assertColIs(null, result1, "foo2"); assertColIs(newValue, result1, "foo"); }
public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } DatasetStateMutator(String username, VirtualDatasetState virtualDatasetState, boolean preview); }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } DatasetStateMutator(String username, VirtualDatasetState virtualDatasetState, boolean preview); void setSql(QueryMetadata metadata); void addColumn(int index, Column column); void addColumn(Column column); void moveColumn(int index, int dest); int columnCount(); void addJoin(Join join); void updateColumnTables(); String uniqueColumnName(String column); String getDatasetAlias(); void groupedBy(List<Column> newColumns, List<Column> groupBys); void addFilter(Filter filter); void setOrdersList(List<Order> columnsList); TransformResult result(); int indexOfCol(String colName); void nest(); TransformResult rename(String oldCol, String newCol); TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn); TransformResult apply(String oldCol, String newCol, Expression newExpWrapped, boolean dropSourceColumn); Expression findColValueForModification(String colName); Expression findColValue(String colName); boolean isGrouped(); void dropColumn(String droppedColumnName); }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } DatasetStateMutator(String username, VirtualDatasetState virtualDatasetState, boolean preview); void setSql(QueryMetadata metadata); void addColumn(int index, Column column); void addColumn(Column column); void moveColumn(int index, int dest); int columnCount(); void addJoin(Join join); void updateColumnTables(); String uniqueColumnName(String column); String getDatasetAlias(); void groupedBy(List<Column> newColumns, List<Column> groupBys); void addFilter(Filter filter); void setOrdersList(List<Order> columnsList); TransformResult result(); int indexOfCol(String colName); void nest(); TransformResult rename(String oldCol, String newCol); TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn); TransformResult apply(String oldCol, String newCol, Expression newExpWrapped, boolean dropSourceColumn); Expression findColValueForModification(String colName); Expression findColValue(String colName); boolean isGrouped(); void dropColumn(String droppedColumnName); }
@Test public void testJoinPreservesAbsoluteOrRelativePaths() { final String actual = PathUtils.join("/a", "/b", "/c"); final String expected = "/a/b/c"; Assert.assertEquals("invalid path", expected, actual); final String actual2 = PathUtils.join("/a", "b", "c"); final String expected2 = "/a/b/c"; Assert.assertEquals("invalid path", expected2, actual2); final String actual3 = PathUtils.join("a", "b", "c"); final String expected3 = "a/b/c"; Assert.assertEquals("invalid path", expected3, actual3); final String actual4 = PathUtils.join("a", "", "c"); final String expected4 = "a/c"; Assert.assertEquals("invalid path", expected4, actual4); final String actual5 = PathUtils.join("", "", "c"); final String expected5 = "c"; Assert.assertEquals("invalid path", expected5, actual5); final String actual6 = PathUtils.join("", "", ""); final String expected6 = ""; Assert.assertEquals("invalid path", expected6, actual6); final String actual7 = PathUtils.join("", "", "/"); final String expected7 = "/"; Assert.assertEquals("invalid path", expected7, actual7); final String actual8 = PathUtils.join("", "", "c/"); final String expected8 = "c/"; Assert.assertEquals("invalid path", expected8, actual8); }
public static final String join(final String... parts) { final StringBuilder sb = new StringBuilder(); for (final String part:parts) { Preconditions.checkNotNull(part, "parts cannot contain null"); if (!Strings.isNullOrEmpty(part)) { sb.append(part).append("/"); } } if (sb.length() > 0) { sb.deleteCharAt(sb.length() - 1); } final String path = sb.toString(); return normalize(path); }
PathUtils { public static final String join(final String... parts) { final StringBuilder sb = new StringBuilder(); for (final String part:parts) { Preconditions.checkNotNull(part, "parts cannot contain null"); if (!Strings.isNullOrEmpty(part)) { sb.append(part).append("/"); } } if (sb.length() > 0) { sb.deleteCharAt(sb.length() - 1); } final String path = sb.toString(); return normalize(path); } }
PathUtils { public static final String join(final String... parts) { final StringBuilder sb = new StringBuilder(); for (final String part:parts) { Preconditions.checkNotNull(part, "parts cannot contain null"); if (!Strings.isNullOrEmpty(part)) { sb.append(part).append("/"); } } if (sb.length() > 0) { sb.deleteCharAt(sb.length() - 1); } final String path = sb.toString(); return normalize(path); } }
PathUtils { public static final String join(final String... parts) { final StringBuilder sb = new StringBuilder(); for (final String part:parts) { Preconditions.checkNotNull(part, "parts cannot contain null"); if (!Strings.isNullOrEmpty(part)) { sb.append(part).append("/"); } } if (sb.length() > 0) { sb.deleteCharAt(sb.length() - 1); } final String path = sb.toString(); return normalize(path); } static final String join(final String... parts); static final String normalize(final String path); }
PathUtils { public static final String join(final String... parts) { final StringBuilder sb = new StringBuilder(); for (final String part:parts) { Preconditions.checkNotNull(part, "parts cannot contain null"); if (!Strings.isNullOrEmpty(part)) { sb.append(part).append("/"); } } if (sb.length() > 0) { sb.deleteCharAt(sb.length() - 1); } final String path = sb.toString(); return normalize(path); } static final String join(final String... parts); static final String normalize(final String path); }
@Test public void testNormalizeRemovesRedundantForwardSlashes() { final String actual = PathUtils.normalize("/a/b/c"); final String expected = "/a/b/c"; Assert.assertEquals("invalid path", expected, actual); final String actual2 = PathUtils.normalize(" final String expected2 = "/a/b/c"; Assert.assertEquals("invalid path", expected2, actual2); final String actual3 = PathUtils.normalize(" final String expected3 = "/"; Assert.assertEquals("invalid path", expected3, actual3); final String actual4 = PathUtils.normalize("/a"); final String expected4 = "/a"; Assert.assertEquals("invalid path", expected4, actual4); final String actual5 = PathUtils.normalize(" final String expected5 = "/"; Assert.assertEquals("invalid path", expected5, actual5); final String actual6 = PathUtils.normalize(""); final String expected6 = ""; Assert.assertEquals("invalid path", expected6, actual6); }
public static final String normalize(final String path) { if (Strings.isNullOrEmpty(Preconditions.checkNotNull(path))) { return path; } final StringBuilder builder = new StringBuilder(); char last = path.charAt(0); builder.append(last); for (int i=1; i<path.length(); i++) { char cur = path.charAt(i); if (last == '/' && cur == last) { continue; } builder.append(cur); last = cur; } return builder.toString(); }
PathUtils { public static final String normalize(final String path) { if (Strings.isNullOrEmpty(Preconditions.checkNotNull(path))) { return path; } final StringBuilder builder = new StringBuilder(); char last = path.charAt(0); builder.append(last); for (int i=1; i<path.length(); i++) { char cur = path.charAt(i); if (last == '/' && cur == last) { continue; } builder.append(cur); last = cur; } return builder.toString(); } }
PathUtils { public static final String normalize(final String path) { if (Strings.isNullOrEmpty(Preconditions.checkNotNull(path))) { return path; } final StringBuilder builder = new StringBuilder(); char last = path.charAt(0); builder.append(last); for (int i=1; i<path.length(); i++) { char cur = path.charAt(i); if (last == '/' && cur == last) { continue; } builder.append(cur); last = cur; } return builder.toString(); } }
PathUtils { public static final String normalize(final String path) { if (Strings.isNullOrEmpty(Preconditions.checkNotNull(path))) { return path; } final StringBuilder builder = new StringBuilder(); char last = path.charAt(0); builder.append(last); for (int i=1; i<path.length(); i++) { char cur = path.charAt(i); if (last == '/' && cur == last) { continue; } builder.append(cur); last = cur; } return builder.toString(); } static final String join(final String... parts); static final String normalize(final String path); }
PathUtils { public static final String normalize(final String path) { if (Strings.isNullOrEmpty(Preconditions.checkNotNull(path))) { return path; } final StringBuilder builder = new StringBuilder(); char last = path.charAt(0); builder.append(last); for (int i=1; i<path.length(); i++) { char cur = path.charAt(i); if (last == '/' && cur == last) { continue; } builder.append(cur); last = cur; } return builder.toString(); } static final String join(final String... parts); static final String normalize(final String path); }
@Test public void testQueueProcessor() throws Exception { Pointer<Long> counter = new Pointer<>(0L); final ReentrantReadWriteLock rwlock = new ReentrantReadWriteLock(); Lock readLock = rwlock.readLock(); Lock writeLock = rwlock.writeLock(); QueueProcessor<Long> qp = new QueueProcessor<>("queue-processor", () -> new AutoCloseableLock(writeLock).open(), (i) -> counter.value += i); qp.start(); final long totalCount = 100_000; final long numBatches = 10; final long batchCount = totalCount / numBatches; for (long i = 0; i < totalCount; i++) { qp.enqueue(new Long(i)); if (i % batchCount == (batchCount - 1)) { Thread.sleep(1); } } final long timeout = 5_000; long loopCount = 0; long expectedValue = totalCount * (totalCount - 1) / 2; while (getValue(counter, readLock) != expectedValue) { Thread.sleep(1); assertTrue(String.format("Timed out after %d ms", timeout), loopCount++ < timeout); } qp.close(); }
public QueueProcessor(String name, Supplier<AutoCloseableLock> lockSupplier, Consumer<T> consumer) { this.name = name; this.lockSupplier = lockSupplier; this.consumer = consumer; this.queue = new LinkedBlockingQueue<>(); this.workerThread = null; this.isClosed = false; this.completed = true; }
QueueProcessor implements AutoCloseable { public QueueProcessor(String name, Supplier<AutoCloseableLock> lockSupplier, Consumer<T> consumer) { this.name = name; this.lockSupplier = lockSupplier; this.consumer = consumer; this.queue = new LinkedBlockingQueue<>(); this.workerThread = null; this.isClosed = false; this.completed = true; } }
QueueProcessor implements AutoCloseable { public QueueProcessor(String name, Supplier<AutoCloseableLock> lockSupplier, Consumer<T> consumer) { this.name = name; this.lockSupplier = lockSupplier; this.consumer = consumer; this.queue = new LinkedBlockingQueue<>(); this.workerThread = null; this.isClosed = false; this.completed = true; } QueueProcessor(String name, Supplier<AutoCloseableLock> lockSupplier, Consumer<T> consumer); }
QueueProcessor implements AutoCloseable { public QueueProcessor(String name, Supplier<AutoCloseableLock> lockSupplier, Consumer<T> consumer) { this.name = name; this.lockSupplier = lockSupplier; this.consumer = consumer; this.queue = new LinkedBlockingQueue<>(); this.workerThread = null; this.isClosed = false; this.completed = true; } QueueProcessor(String name, Supplier<AutoCloseableLock> lockSupplier, Consumer<T> consumer); void enqueue(T event); void start(); @VisibleForTesting boolean completed(); void close(); }
QueueProcessor implements AutoCloseable { public QueueProcessor(String name, Supplier<AutoCloseableLock> lockSupplier, Consumer<T> consumer) { this.name = name; this.lockSupplier = lockSupplier; this.consumer = consumer; this.queue = new LinkedBlockingQueue<>(); this.workerThread = null; this.isClosed = false; this.completed = true; } QueueProcessor(String name, Supplier<AutoCloseableLock> lockSupplier, Consumer<T> consumer); void enqueue(T event); void start(); @VisibleForTesting boolean completed(); void close(); }
@Test public void testConfigureAndStart() throws IOException, InterruptedException, MalformedObjectNameException { final MetricRegistry metrics = new MetricRegistry(); metrics.counter(NAME, () -> { Counter counter = new Counter(); counter.inc(1234); return counter; }); final JmxConfigurator configurator = new JmxConfigurator(TimeUnit.SECONDS, TimeUnit.MILLISECONDS); configurator.configureAndStart("test", metrics, new MetricFilter() { @Override public boolean matches(String s, Metric metric) { return true; } }); final Set<ObjectInstance> beans = ManagementFactory.getPlatformMBeanServer().queryMBeans(new ObjectName(OBJECT_NAME), null); assertEquals(1, beans.size()); assertEquals(OBJECT_NAME, beans.iterator().next().getObjectName().getCanonicalName()); }
@Override public void configureAndStart(String name, MetricRegistry registry, MetricFilter filter) { reporter = JmxReporter.forRegistry(registry).convertRatesTo(rateUnit).convertDurationsTo(durationUnit).filter(filter).build(); reporter.start(); }
JmxConfigurator extends ReporterConfigurator { @Override public void configureAndStart(String name, MetricRegistry registry, MetricFilter filter) { reporter = JmxReporter.forRegistry(registry).convertRatesTo(rateUnit).convertDurationsTo(durationUnit).filter(filter).build(); reporter.start(); } }
JmxConfigurator extends ReporterConfigurator { @Override public void configureAndStart(String name, MetricRegistry registry, MetricFilter filter) { reporter = JmxReporter.forRegistry(registry).convertRatesTo(rateUnit).convertDurationsTo(durationUnit).filter(filter).build(); reporter.start(); } @JsonCreator JmxConfigurator( @JsonProperty("rate") TimeUnit rateUnit, @JsonProperty("duration") TimeUnit durationUnit); }
JmxConfigurator extends ReporterConfigurator { @Override public void configureAndStart(String name, MetricRegistry registry, MetricFilter filter) { reporter = JmxReporter.forRegistry(registry).convertRatesTo(rateUnit).convertDurationsTo(durationUnit).filter(filter).build(); reporter.start(); } @JsonCreator JmxConfigurator( @JsonProperty("rate") TimeUnit rateUnit, @JsonProperty("duration") TimeUnit durationUnit); @Override void configureAndStart(String name, MetricRegistry registry, MetricFilter filter); @Override int hashCode(); @Override boolean equals(Object other); @Override void close(); }
JmxConfigurator extends ReporterConfigurator { @Override public void configureAndStart(String name, MetricRegistry registry, MetricFilter filter) { reporter = JmxReporter.forRegistry(registry).convertRatesTo(rateUnit).convertDurationsTo(durationUnit).filter(filter).build(); reporter.start(); } @JsonCreator JmxConfigurator( @JsonProperty("rate") TimeUnit rateUnit, @JsonProperty("duration") TimeUnit durationUnit); @Override void configureAndStart(String name, MetricRegistry registry, MetricFilter filter); @Override int hashCode(); @Override boolean equals(Object other); @Override void close(); }
@Test public void close() throws Exception { final CloseableSchedulerThreadPool executorService = mock(CloseableSchedulerThreadPool.class); final LocalSchedulerService service = new LocalSchedulerService(executorService, null, null, false); service.close(); verify(executorService).close(); }
@Override public void close() throws Exception { LOGGER.info("Stopping SchedulerService"); AutoCloseables.close(AutoCloseables.iter(executorService), taskLeaderElectionServiceMap.values()); LOGGER.info("Stopped SchedulerService"); }
LocalSchedulerService implements SchedulerService { @Override public void close() throws Exception { LOGGER.info("Stopping SchedulerService"); AutoCloseables.close(AutoCloseables.iter(executorService), taskLeaderElectionServiceMap.values()); LOGGER.info("Stopped SchedulerService"); } }
LocalSchedulerService implements SchedulerService { @Override public void close() throws Exception { LOGGER.info("Stopping SchedulerService"); AutoCloseables.close(AutoCloseables.iter(executorService), taskLeaderElectionServiceMap.values()); LOGGER.info("Stopped SchedulerService"); } LocalSchedulerService(int corePoolSize); LocalSchedulerService(int corePoolSize, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); @VisibleForTesting LocalSchedulerService(CloseableSchedulerThreadPool executorService, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); }
LocalSchedulerService implements SchedulerService { @Override public void close() throws Exception { LOGGER.info("Stopping SchedulerService"); AutoCloseables.close(AutoCloseables.iter(executorService), taskLeaderElectionServiceMap.values()); LOGGER.info("Stopped SchedulerService"); } LocalSchedulerService(int corePoolSize); LocalSchedulerService(int corePoolSize, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); @VisibleForTesting LocalSchedulerService(CloseableSchedulerThreadPool executorService, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); @VisibleForTesting CloseableSchedulerThreadPool getExecutorService(); @Override void close(); @Override void start(); @Override Cancellable schedule(Schedule schedule, Runnable task); }
LocalSchedulerService implements SchedulerService { @Override public void close() throws Exception { LOGGER.info("Stopping SchedulerService"); AutoCloseables.close(AutoCloseables.iter(executorService), taskLeaderElectionServiceMap.values()); LOGGER.info("Stopped SchedulerService"); } LocalSchedulerService(int corePoolSize); LocalSchedulerService(int corePoolSize, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); @VisibleForTesting LocalSchedulerService(CloseableSchedulerThreadPool executorService, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); @VisibleForTesting CloseableSchedulerThreadPool getExecutorService(); @Override void close(); @Override void start(); @Override Cancellable schedule(Schedule schedule, Runnable task); }
@Test public void newThread() { LocalSchedulerService service = new LocalSchedulerService(1); final Runnable runnable = mock(Runnable.class); final Thread thread = service.getExecutorService().getThreadFactory().newThread(runnable); assertTrue("thread should be a daemon thread", thread.isDaemon()); assertTrue("thread name should start with scheduler-", thread.getName().startsWith("scheduler-")); }
@VisibleForTesting public CloseableSchedulerThreadPool getExecutorService() { return executorService; }
LocalSchedulerService implements SchedulerService { @VisibleForTesting public CloseableSchedulerThreadPool getExecutorService() { return executorService; } }
LocalSchedulerService implements SchedulerService { @VisibleForTesting public CloseableSchedulerThreadPool getExecutorService() { return executorService; } LocalSchedulerService(int corePoolSize); LocalSchedulerService(int corePoolSize, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); @VisibleForTesting LocalSchedulerService(CloseableSchedulerThreadPool executorService, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); }
LocalSchedulerService implements SchedulerService { @VisibleForTesting public CloseableSchedulerThreadPool getExecutorService() { return executorService; } LocalSchedulerService(int corePoolSize); LocalSchedulerService(int corePoolSize, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); @VisibleForTesting LocalSchedulerService(CloseableSchedulerThreadPool executorService, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); @VisibleForTesting CloseableSchedulerThreadPool getExecutorService(); @Override void close(); @Override void start(); @Override Cancellable schedule(Schedule schedule, Runnable task); }
LocalSchedulerService implements SchedulerService { @VisibleForTesting public CloseableSchedulerThreadPool getExecutorService() { return executorService; } LocalSchedulerService(int corePoolSize); LocalSchedulerService(int corePoolSize, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); @VisibleForTesting LocalSchedulerService(CloseableSchedulerThreadPool executorService, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); @VisibleForTesting CloseableSchedulerThreadPool getExecutorService(); @Override void close(); @Override void start(); @Override Cancellable schedule(Schedule schedule, Runnable task); }
@Test public void schedule() throws Exception { final List<MockScheduledFuture<?>> futures = Lists.newArrayList(); final CloseableSchedulerThreadPool executorService = mock(CloseableSchedulerThreadPool.class); doAnswer(new Answer<ScheduledFuture<Object>>() { @Override public ScheduledFuture<Object> answer(InvocationOnMock invocation) throws Throwable { final Object[] arguments = invocation.getArguments(); MockScheduledFuture<Object> result = new MockScheduledFuture<>(Clock.systemUTC(), Executors.callable((Runnable) arguments[0]), (long) arguments[1], (TimeUnit) arguments[2]); futures.add(result); return result; } }).when(executorService).schedule(any(Runnable.class), anyLong(), any(TimeUnit.class)); final AtomicInteger runCount = new AtomicInteger(0); final Runnable runnable = new Runnable() { @Override public void run() { runCount.incrementAndGet(); } }; @SuppressWarnings("resource") final SchedulerService service = new LocalSchedulerService(executorService, null, null, false); @SuppressWarnings("unused") final Cancellable cancellable = service.schedule(Schedule.Builder.everyHours(1).build(), runnable); ImmutableList<MockScheduledFuture<?>> copyOfFutures = ImmutableList.copyOf(futures); for(MockScheduledFuture<?> future: copyOfFutures) { future.call(); } assertEquals(2, futures.size()); assertTrue("1st future should be completed", futures.get(0).isDone()); assertFalse("2nd future should still be pending", futures.get(1).isDone()); assertTrue("1st future delay should be shorted than 2nd future delay", futures.get(0).compareTo(futures.get(1)) < 0); assertEquals(1, runCount.get()); }
@Override public Cancellable schedule(Schedule schedule, Runnable task) { if (!assumeTaskLeadership) { return plainSchedule(schedule, task); } if (!schedule.isDistributedSingleton()) { return plainSchedule(schedule, task); } final TaskLeaderElection taskLeaderElection = getTaskLeaderElection(schedule); CancellableTask cancellableTask = new CancellableTask(schedule, task, schedule.getTaskName()); taskLeaderElection.getTaskLeader(); taskLeaderElection.addListener(cancellableTask.taskLeaderChangeListener); if (taskLeaderElection.isTaskLeader()) { cancellableTask.scheduleNext(); } else { cancellableTask.taskState = true; } return cancellableTask; }
LocalSchedulerService implements SchedulerService { @Override public Cancellable schedule(Schedule schedule, Runnable task) { if (!assumeTaskLeadership) { return plainSchedule(schedule, task); } if (!schedule.isDistributedSingleton()) { return plainSchedule(schedule, task); } final TaskLeaderElection taskLeaderElection = getTaskLeaderElection(schedule); CancellableTask cancellableTask = new CancellableTask(schedule, task, schedule.getTaskName()); taskLeaderElection.getTaskLeader(); taskLeaderElection.addListener(cancellableTask.taskLeaderChangeListener); if (taskLeaderElection.isTaskLeader()) { cancellableTask.scheduleNext(); } else { cancellableTask.taskState = true; } return cancellableTask; } }
LocalSchedulerService implements SchedulerService { @Override public Cancellable schedule(Schedule schedule, Runnable task) { if (!assumeTaskLeadership) { return plainSchedule(schedule, task); } if (!schedule.isDistributedSingleton()) { return plainSchedule(schedule, task); } final TaskLeaderElection taskLeaderElection = getTaskLeaderElection(schedule); CancellableTask cancellableTask = new CancellableTask(schedule, task, schedule.getTaskName()); taskLeaderElection.getTaskLeader(); taskLeaderElection.addListener(cancellableTask.taskLeaderChangeListener); if (taskLeaderElection.isTaskLeader()) { cancellableTask.scheduleNext(); } else { cancellableTask.taskState = true; } return cancellableTask; } LocalSchedulerService(int corePoolSize); LocalSchedulerService(int corePoolSize, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); @VisibleForTesting LocalSchedulerService(CloseableSchedulerThreadPool executorService, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); }
LocalSchedulerService implements SchedulerService { @Override public Cancellable schedule(Schedule schedule, Runnable task) { if (!assumeTaskLeadership) { return plainSchedule(schedule, task); } if (!schedule.isDistributedSingleton()) { return plainSchedule(schedule, task); } final TaskLeaderElection taskLeaderElection = getTaskLeaderElection(schedule); CancellableTask cancellableTask = new CancellableTask(schedule, task, schedule.getTaskName()); taskLeaderElection.getTaskLeader(); taskLeaderElection.addListener(cancellableTask.taskLeaderChangeListener); if (taskLeaderElection.isTaskLeader()) { cancellableTask.scheduleNext(); } else { cancellableTask.taskState = true; } return cancellableTask; } LocalSchedulerService(int corePoolSize); LocalSchedulerService(int corePoolSize, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); @VisibleForTesting LocalSchedulerService(CloseableSchedulerThreadPool executorService, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); @VisibleForTesting CloseableSchedulerThreadPool getExecutorService(); @Override void close(); @Override void start(); @Override Cancellable schedule(Schedule schedule, Runnable task); }
LocalSchedulerService implements SchedulerService { @Override public Cancellable schedule(Schedule schedule, Runnable task) { if (!assumeTaskLeadership) { return plainSchedule(schedule, task); } if (!schedule.isDistributedSingleton()) { return plainSchedule(schedule, task); } final TaskLeaderElection taskLeaderElection = getTaskLeaderElection(schedule); CancellableTask cancellableTask = new CancellableTask(schedule, task, schedule.getTaskName()); taskLeaderElection.getTaskLeader(); taskLeaderElection.addListener(cancellableTask.taskLeaderChangeListener); if (taskLeaderElection.isTaskLeader()) { cancellableTask.scheduleNext(); } else { cancellableTask.taskState = true; } return cancellableTask; } LocalSchedulerService(int corePoolSize); LocalSchedulerService(int corePoolSize, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); @VisibleForTesting LocalSchedulerService(CloseableSchedulerThreadPool executorService, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); @VisibleForTesting CloseableSchedulerThreadPool getExecutorService(); @Override void close(); @Override void start(); @Override Cancellable schedule(Schedule schedule, Runnable task); }
@Test public void scheduleCancelledBeforeRun() throws Exception { final List<MockScheduledFuture<?>> futures = Lists.newArrayList(); final CloseableSchedulerThreadPool executorService = mock(CloseableSchedulerThreadPool.class); doAnswer(new Answer<ScheduledFuture<Object>>() { @Override public ScheduledFuture<Object> answer(InvocationOnMock invocation) throws Throwable { final Object[] arguments = invocation.getArguments(); MockScheduledFuture<Object> result = new MockScheduledFuture<>(Clock.systemUTC(), Executors.callable((Runnable) arguments[0]), (long) arguments[1], (TimeUnit) arguments[2]); futures.add(result); return result; } }).when(executorService).schedule(any(Runnable.class), anyLong(), any(TimeUnit.class)); final AtomicInteger runCount = new AtomicInteger(0); final Runnable runnable = new Runnable() { @Override public void run() { runCount.incrementAndGet(); } }; @SuppressWarnings("resource") final SchedulerService service = new LocalSchedulerService(executorService, null, null, false); final Cancellable cancellable = service.schedule(Schedule.Builder.everyHours(1).build(), runnable); cancellable.cancel(true); ImmutableList<MockScheduledFuture<?>> copyOfFutures = ImmutableList.copyOf(futures); for(MockScheduledFuture<?> future: copyOfFutures) { try { future.call(); } catch (IllegalStateException e) { } } assertTrue("Cancellable should have been cancelled", cancellable.isCancelled()); assertEquals(1, futures.size()); assertTrue("1st future should be completed", futures.get(0).isDone()); assertEquals(0, runCount.get()); }
@Override public Cancellable schedule(Schedule schedule, Runnable task) { if (!assumeTaskLeadership) { return plainSchedule(schedule, task); } if (!schedule.isDistributedSingleton()) { return plainSchedule(schedule, task); } final TaskLeaderElection taskLeaderElection = getTaskLeaderElection(schedule); CancellableTask cancellableTask = new CancellableTask(schedule, task, schedule.getTaskName()); taskLeaderElection.getTaskLeader(); taskLeaderElection.addListener(cancellableTask.taskLeaderChangeListener); if (taskLeaderElection.isTaskLeader()) { cancellableTask.scheduleNext(); } else { cancellableTask.taskState = true; } return cancellableTask; }
LocalSchedulerService implements SchedulerService { @Override public Cancellable schedule(Schedule schedule, Runnable task) { if (!assumeTaskLeadership) { return plainSchedule(schedule, task); } if (!schedule.isDistributedSingleton()) { return plainSchedule(schedule, task); } final TaskLeaderElection taskLeaderElection = getTaskLeaderElection(schedule); CancellableTask cancellableTask = new CancellableTask(schedule, task, schedule.getTaskName()); taskLeaderElection.getTaskLeader(); taskLeaderElection.addListener(cancellableTask.taskLeaderChangeListener); if (taskLeaderElection.isTaskLeader()) { cancellableTask.scheduleNext(); } else { cancellableTask.taskState = true; } return cancellableTask; } }
LocalSchedulerService implements SchedulerService { @Override public Cancellable schedule(Schedule schedule, Runnable task) { if (!assumeTaskLeadership) { return plainSchedule(schedule, task); } if (!schedule.isDistributedSingleton()) { return plainSchedule(schedule, task); } final TaskLeaderElection taskLeaderElection = getTaskLeaderElection(schedule); CancellableTask cancellableTask = new CancellableTask(schedule, task, schedule.getTaskName()); taskLeaderElection.getTaskLeader(); taskLeaderElection.addListener(cancellableTask.taskLeaderChangeListener); if (taskLeaderElection.isTaskLeader()) { cancellableTask.scheduleNext(); } else { cancellableTask.taskState = true; } return cancellableTask; } LocalSchedulerService(int corePoolSize); LocalSchedulerService(int corePoolSize, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); @VisibleForTesting LocalSchedulerService(CloseableSchedulerThreadPool executorService, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); }
LocalSchedulerService implements SchedulerService { @Override public Cancellable schedule(Schedule schedule, Runnable task) { if (!assumeTaskLeadership) { return plainSchedule(schedule, task); } if (!schedule.isDistributedSingleton()) { return plainSchedule(schedule, task); } final TaskLeaderElection taskLeaderElection = getTaskLeaderElection(schedule); CancellableTask cancellableTask = new CancellableTask(schedule, task, schedule.getTaskName()); taskLeaderElection.getTaskLeader(); taskLeaderElection.addListener(cancellableTask.taskLeaderChangeListener); if (taskLeaderElection.isTaskLeader()) { cancellableTask.scheduleNext(); } else { cancellableTask.taskState = true; } return cancellableTask; } LocalSchedulerService(int corePoolSize); LocalSchedulerService(int corePoolSize, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); @VisibleForTesting LocalSchedulerService(CloseableSchedulerThreadPool executorService, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); @VisibleForTesting CloseableSchedulerThreadPool getExecutorService(); @Override void close(); @Override void start(); @Override Cancellable schedule(Schedule schedule, Runnable task); }
LocalSchedulerService implements SchedulerService { @Override public Cancellable schedule(Schedule schedule, Runnable task) { if (!assumeTaskLeadership) { return plainSchedule(schedule, task); } if (!schedule.isDistributedSingleton()) { return plainSchedule(schedule, task); } final TaskLeaderElection taskLeaderElection = getTaskLeaderElection(schedule); CancellableTask cancellableTask = new CancellableTask(schedule, task, schedule.getTaskName()); taskLeaderElection.getTaskLeader(); taskLeaderElection.addListener(cancellableTask.taskLeaderChangeListener); if (taskLeaderElection.isTaskLeader()) { cancellableTask.scheduleNext(); } else { cancellableTask.taskState = true; } return cancellableTask; } LocalSchedulerService(int corePoolSize); LocalSchedulerService(int corePoolSize, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); @VisibleForTesting LocalSchedulerService(CloseableSchedulerThreadPool executorService, Provider<ClusterCoordinator> clusterCoordinatorProvider, Provider<CoordinationProtos.NodeEndpoint> currentNode, boolean assumeTaskLeadership); @VisibleForTesting CloseableSchedulerThreadPool getExecutorService(); @Override void close(); @Override void start(); @Override Cancellable schedule(Schedule schedule, Runnable task); }
@Test public void onlyExclude() { IncludesExcludesFilter f = new IncludesExcludesFilter(Arrays.asList(), Arrays.asList("a.*")); assertFalse(f.matches("alpha", null)); assertTrue(f.matches("beta", null)); }
@Override public boolean matches(String name, Metric metric) { if (includes.isEmpty() || matches(name, includes)) { return allowedViaExcludes(name); } return false; }
IncludesExcludesFilter implements MetricFilter { @Override public boolean matches(String name, Metric metric) { if (includes.isEmpty() || matches(name, includes)) { return allowedViaExcludes(name); } return false; } }
IncludesExcludesFilter implements MetricFilter { @Override public boolean matches(String name, Metric metric) { if (includes.isEmpty() || matches(name, includes)) { return allowedViaExcludes(name); } return false; } IncludesExcludesFilter(List<String> includes, List<String> excludes); }
IncludesExcludesFilter implements MetricFilter { @Override public boolean matches(String name, Metric metric) { if (includes.isEmpty() || matches(name, includes)) { return allowedViaExcludes(name); } return false; } IncludesExcludesFilter(List<String> includes, List<String> excludes); @Override boolean matches(String name, Metric metric); @Override int hashCode(); @Override boolean equals(Object other); }
IncludesExcludesFilter implements MetricFilter { @Override public boolean matches(String name, Metric metric) { if (includes.isEmpty() || matches(name, includes)) { return allowedViaExcludes(name); } return false; } IncludesExcludesFilter(List<String> includes, List<String> excludes); @Override boolean matches(String name, Metric metric); @Override int hashCode(); @Override boolean equals(Object other); }
@Test public void onlyInclude() { IncludesExcludesFilter f = new IncludesExcludesFilter(Arrays.asList("a.*"), Arrays.asList()); assertTrue(f.matches("alpha", null)); assertFalse(f.matches("beta", null)); }
@Override public boolean matches(String name, Metric metric) { if (includes.isEmpty() || matches(name, includes)) { return allowedViaExcludes(name); } return false; }
IncludesExcludesFilter implements MetricFilter { @Override public boolean matches(String name, Metric metric) { if (includes.isEmpty() || matches(name, includes)) { return allowedViaExcludes(name); } return false; } }
IncludesExcludesFilter implements MetricFilter { @Override public boolean matches(String name, Metric metric) { if (includes.isEmpty() || matches(name, includes)) { return allowedViaExcludes(name); } return false; } IncludesExcludesFilter(List<String> includes, List<String> excludes); }
IncludesExcludesFilter implements MetricFilter { @Override public boolean matches(String name, Metric metric) { if (includes.isEmpty() || matches(name, includes)) { return allowedViaExcludes(name); } return false; } IncludesExcludesFilter(List<String> includes, List<String> excludes); @Override boolean matches(String name, Metric metric); @Override int hashCode(); @Override boolean equals(Object other); }
IncludesExcludesFilter implements MetricFilter { @Override public boolean matches(String name, Metric metric) { if (includes.isEmpty() || matches(name, includes)) { return allowedViaExcludes(name); } return false; } IncludesExcludesFilter(List<String> includes, List<String> excludes); @Override boolean matches(String name, Metric metric); @Override int hashCode(); @Override boolean equals(Object other); }
@Test public void testMutatorApplyDropNonPreview() { boolean preview = false; TransformResult result2 = mutator(preview).apply("foo", "foo2", newValue, true); assertEquals(newHashSet("foo2"), result2.getAddedColumns()); assertEquals(newHashSet(), result2.getModifiedColumns()); assertEquals(newHashSet("foo"), result2.getRemovedColumns()); assertColIs(null, result2, "foo"); assertColIs(newValue, result2, "foo2"); }
public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } DatasetStateMutator(String username, VirtualDatasetState virtualDatasetState, boolean preview); }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } DatasetStateMutator(String username, VirtualDatasetState virtualDatasetState, boolean preview); void setSql(QueryMetadata metadata); void addColumn(int index, Column column); void addColumn(Column column); void moveColumn(int index, int dest); int columnCount(); void addJoin(Join join); void updateColumnTables(); String uniqueColumnName(String column); String getDatasetAlias(); void groupedBy(List<Column> newColumns, List<Column> groupBys); void addFilter(Filter filter); void setOrdersList(List<Order> columnsList); TransformResult result(); int indexOfCol(String colName); void nest(); TransformResult rename(String oldCol, String newCol); TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn); TransformResult apply(String oldCol, String newCol, Expression newExpWrapped, boolean dropSourceColumn); Expression findColValueForModification(String colName); Expression findColValue(String colName); boolean isGrouped(); void dropColumn(String droppedColumnName); }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } DatasetStateMutator(String username, VirtualDatasetState virtualDatasetState, boolean preview); void setSql(QueryMetadata metadata); void addColumn(int index, Column column); void addColumn(Column column); void moveColumn(int index, int dest); int columnCount(); void addJoin(Join join); void updateColumnTables(); String uniqueColumnName(String column); String getDatasetAlias(); void groupedBy(List<Column> newColumns, List<Column> groupBys); void addFilter(Filter filter); void setOrdersList(List<Order> columnsList); TransformResult result(); int indexOfCol(String colName); void nest(); TransformResult rename(String oldCol, String newCol); TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn); TransformResult apply(String oldCol, String newCol, Expression newExpWrapped, boolean dropSourceColumn); Expression findColValueForModification(String colName); Expression findColValue(String colName); boolean isGrouped(); void dropColumn(String droppedColumnName); }
@Test public void includeAndExclude() { IncludesExcludesFilter f = new IncludesExcludesFilter(Arrays.asList("a.*"), Arrays.asList("a\\.b.*")); assertTrue(f.matches("a.alpha", null)); assertFalse(f.matches("a.beta", null)); }
@Override public boolean matches(String name, Metric metric) { if (includes.isEmpty() || matches(name, includes)) { return allowedViaExcludes(name); } return false; }
IncludesExcludesFilter implements MetricFilter { @Override public boolean matches(String name, Metric metric) { if (includes.isEmpty() || matches(name, includes)) { return allowedViaExcludes(name); } return false; } }
IncludesExcludesFilter implements MetricFilter { @Override public boolean matches(String name, Metric metric) { if (includes.isEmpty() || matches(name, includes)) { return allowedViaExcludes(name); } return false; } IncludesExcludesFilter(List<String> includes, List<String> excludes); }
IncludesExcludesFilter implements MetricFilter { @Override public boolean matches(String name, Metric metric) { if (includes.isEmpty() || matches(name, includes)) { return allowedViaExcludes(name); } return false; } IncludesExcludesFilter(List<String> includes, List<String> excludes); @Override boolean matches(String name, Metric metric); @Override int hashCode(); @Override boolean equals(Object other); }
IncludesExcludesFilter implements MetricFilter { @Override public boolean matches(String name, Metric metric) { if (includes.isEmpty() || matches(name, includes)) { return allowedViaExcludes(name); } return false; } IncludesExcludesFilter(List<String> includes, List<String> excludes); @Override boolean matches(String name, Metric metric); @Override int hashCode(); @Override boolean equals(Object other); }
@Test public void noValues() { IncludesExcludesFilter f = new IncludesExcludesFilter(Arrays.asList(), Arrays.asList()); assertTrue(f.matches(null, null)); }
@Override public boolean matches(String name, Metric metric) { if (includes.isEmpty() || matches(name, includes)) { return allowedViaExcludes(name); } return false; }
IncludesExcludesFilter implements MetricFilter { @Override public boolean matches(String name, Metric metric) { if (includes.isEmpty() || matches(name, includes)) { return allowedViaExcludes(name); } return false; } }
IncludesExcludesFilter implements MetricFilter { @Override public boolean matches(String name, Metric metric) { if (includes.isEmpty() || matches(name, includes)) { return allowedViaExcludes(name); } return false; } IncludesExcludesFilter(List<String> includes, List<String> excludes); }
IncludesExcludesFilter implements MetricFilter { @Override public boolean matches(String name, Metric metric) { if (includes.isEmpty() || matches(name, includes)) { return allowedViaExcludes(name); } return false; } IncludesExcludesFilter(List<String> includes, List<String> excludes); @Override boolean matches(String name, Metric metric); @Override int hashCode(); @Override boolean equals(Object other); }
IncludesExcludesFilter implements MetricFilter { @Override public boolean matches(String name, Metric metric) { if (includes.isEmpty() || matches(name, includes)) { return allowedViaExcludes(name); } return false; } IncludesExcludesFilter(List<String> includes, List<String> excludes); @Override boolean matches(String name, Metric metric); @Override int hashCode(); @Override boolean equals(Object other); }
@Test public void TestInterleavedMidToSmall() { LinkedList<String> l1 = GetRandomStringList(TOTAL_STRINGS, midAvgSize); LinkedList<String> l2 = GetRandomStringList(TOTAL_STRINGS, smallAvgSize); final double threshold = 1.0D; System.out.println("TestInterleavedMidToSmall: threshold: " + threshold + " (only forcecompact)"); MutableVarcharVector m1 = new MutableVarcharVector("TestInterleavedMidToSmall", testAllocator, threshold ); try { TestInterLeaved(m1, l1, l2); } finally { m1.close(); } }
@Override public void close() { this.clear(); }
MutableVarcharVector extends BaseVariableWidthVector { @Override public void close() { this.clear(); } }
MutableVarcharVector extends BaseVariableWidthVector { @Override public void close() { this.clear(); } MutableVarcharVector(String name, BufferAllocator allocator, double compactionThreshold); MutableVarcharVector(String name, FieldType fieldType, BufferAllocator allocator, double compactionThreshold); }
MutableVarcharVector extends BaseVariableWidthVector { @Override public void close() { this.clear(); } MutableVarcharVector(String name, BufferAllocator allocator, double compactionThreshold); MutableVarcharVector(String name, FieldType fieldType, BufferAllocator allocator, double compactionThreshold); final void setCompactionThreshold(double in); final boolean needsCompaction(); final int getCurrentOffset(); final int getGarbageSizeInBytes(); final int getUsedByteCapacity(); @Override FieldReader getReader(); @Override MinorType getMinorType(); void zeroVector(); void reset(); @Override void close(); @Override void clear(); byte[] get(int index); Text getObject(int index); void get(int index, NullableVarCharHolder holder); void copyFrom(int fromIndex, int thisIndex, VarCharVector from); void copyFromSafe(int fromIndex, int thisIndex, VarCharVector from); void compact(); void forceCompact(); void set(int index, VarCharHolder holder); void setSafe(int index, VarCharHolder holder); void set(int index, NullableVarCharHolder holder); void setSafe(int index, NullableVarCharHolder holder); void set(int index, Text text); void setSafe(int index, Text text); void set(int index, byte[] value); void setSafe(int index, byte[] value); void set(int index, byte[] value, int start, int length); void setSafe(int index, byte[] value, int start, int length); void set(int index, ByteBuffer value, int start, int length); void setSafe(int index, ByteBuffer value, int start, int length); void set(int index, int isSet, int start, int end, ArrowBuf buffer); void setSafe(int index, int isSet, int start, int end, ArrowBuf buffer); void set(int index, int start, int length, ArrowBuf buffer); void setSafe(int index, int start, int length, ArrowBuf buffer); void copyToVarchar(VarCharVector in, final int from, final int to); boolean isIndexSafe(int index); @Override TransferPair getTransferPair(String ref, BufferAllocator allocator); @Override TransferPair makeTransferPair(ValueVector to); }
MutableVarcharVector extends BaseVariableWidthVector { @Override public void close() { this.clear(); } MutableVarcharVector(String name, BufferAllocator allocator, double compactionThreshold); MutableVarcharVector(String name, FieldType fieldType, BufferAllocator allocator, double compactionThreshold); final void setCompactionThreshold(double in); final boolean needsCompaction(); final int getCurrentOffset(); final int getGarbageSizeInBytes(); final int getUsedByteCapacity(); @Override FieldReader getReader(); @Override MinorType getMinorType(); void zeroVector(); void reset(); @Override void close(); @Override void clear(); byte[] get(int index); Text getObject(int index); void get(int index, NullableVarCharHolder holder); void copyFrom(int fromIndex, int thisIndex, VarCharVector from); void copyFromSafe(int fromIndex, int thisIndex, VarCharVector from); void compact(); void forceCompact(); void set(int index, VarCharHolder holder); void setSafe(int index, VarCharHolder holder); void set(int index, NullableVarCharHolder holder); void setSafe(int index, NullableVarCharHolder holder); void set(int index, Text text); void setSafe(int index, Text text); void set(int index, byte[] value); void setSafe(int index, byte[] value); void set(int index, byte[] value, int start, int length); void setSafe(int index, byte[] value, int start, int length); void set(int index, ByteBuffer value, int start, int length); void setSafe(int index, ByteBuffer value, int start, int length); void set(int index, int isSet, int start, int end, ArrowBuf buffer); void setSafe(int index, int isSet, int start, int end, ArrowBuf buffer); void set(int index, int start, int length, ArrowBuf buffer); void setSafe(int index, int start, int length, ArrowBuf buffer); void copyToVarchar(VarCharVector in, final int from, final int to); boolean isIndexSafe(int index); @Override TransferPair getTransferPair(String ref, BufferAllocator allocator); @Override TransferPair makeTransferPair(ValueVector to); }
@Test public void TestInterleavedSmallToMid() { LinkedList<String> l1 = GetRandomStringList(TOTAL_STRINGS, smallAvgSize); LinkedList<String> l2 = GetRandomStringList(TOTAL_STRINGS, midAvgSize); final double threshold = 1.0D; System.out.println("TestInterleavedSmallToMid: threshold: " + threshold + " (only forcecompact)"); MutableVarcharVector m1 = new MutableVarcharVector("TestInterleavedSmallToMid", testAllocator, 1.0 ); try { TestInterLeaved(m1, l1, l2); } finally { m1.close(); } }
@Override public void close() { this.clear(); }
MutableVarcharVector extends BaseVariableWidthVector { @Override public void close() { this.clear(); } }
MutableVarcharVector extends BaseVariableWidthVector { @Override public void close() { this.clear(); } MutableVarcharVector(String name, BufferAllocator allocator, double compactionThreshold); MutableVarcharVector(String name, FieldType fieldType, BufferAllocator allocator, double compactionThreshold); }
MutableVarcharVector extends BaseVariableWidthVector { @Override public void close() { this.clear(); } MutableVarcharVector(String name, BufferAllocator allocator, double compactionThreshold); MutableVarcharVector(String name, FieldType fieldType, BufferAllocator allocator, double compactionThreshold); final void setCompactionThreshold(double in); final boolean needsCompaction(); final int getCurrentOffset(); final int getGarbageSizeInBytes(); final int getUsedByteCapacity(); @Override FieldReader getReader(); @Override MinorType getMinorType(); void zeroVector(); void reset(); @Override void close(); @Override void clear(); byte[] get(int index); Text getObject(int index); void get(int index, NullableVarCharHolder holder); void copyFrom(int fromIndex, int thisIndex, VarCharVector from); void copyFromSafe(int fromIndex, int thisIndex, VarCharVector from); void compact(); void forceCompact(); void set(int index, VarCharHolder holder); void setSafe(int index, VarCharHolder holder); void set(int index, NullableVarCharHolder holder); void setSafe(int index, NullableVarCharHolder holder); void set(int index, Text text); void setSafe(int index, Text text); void set(int index, byte[] value); void setSafe(int index, byte[] value); void set(int index, byte[] value, int start, int length); void setSafe(int index, byte[] value, int start, int length); void set(int index, ByteBuffer value, int start, int length); void setSafe(int index, ByteBuffer value, int start, int length); void set(int index, int isSet, int start, int end, ArrowBuf buffer); void setSafe(int index, int isSet, int start, int end, ArrowBuf buffer); void set(int index, int start, int length, ArrowBuf buffer); void setSafe(int index, int start, int length, ArrowBuf buffer); void copyToVarchar(VarCharVector in, final int from, final int to); boolean isIndexSafe(int index); @Override TransferPair getTransferPair(String ref, BufferAllocator allocator); @Override TransferPair makeTransferPair(ValueVector to); }
MutableVarcharVector extends BaseVariableWidthVector { @Override public void close() { this.clear(); } MutableVarcharVector(String name, BufferAllocator allocator, double compactionThreshold); MutableVarcharVector(String name, FieldType fieldType, BufferAllocator allocator, double compactionThreshold); final void setCompactionThreshold(double in); final boolean needsCompaction(); final int getCurrentOffset(); final int getGarbageSizeInBytes(); final int getUsedByteCapacity(); @Override FieldReader getReader(); @Override MinorType getMinorType(); void zeroVector(); void reset(); @Override void close(); @Override void clear(); byte[] get(int index); Text getObject(int index); void get(int index, NullableVarCharHolder holder); void copyFrom(int fromIndex, int thisIndex, VarCharVector from); void copyFromSafe(int fromIndex, int thisIndex, VarCharVector from); void compact(); void forceCompact(); void set(int index, VarCharHolder holder); void setSafe(int index, VarCharHolder holder); void set(int index, NullableVarCharHolder holder); void setSafe(int index, NullableVarCharHolder holder); void set(int index, Text text); void setSafe(int index, Text text); void set(int index, byte[] value); void setSafe(int index, byte[] value); void set(int index, byte[] value, int start, int length); void setSafe(int index, byte[] value, int start, int length); void set(int index, ByteBuffer value, int start, int length); void setSafe(int index, ByteBuffer value, int start, int length); void set(int index, int isSet, int start, int end, ArrowBuf buffer); void setSafe(int index, int isSet, int start, int end, ArrowBuf buffer); void set(int index, int start, int length, ArrowBuf buffer); void setSafe(int index, int start, int length, ArrowBuf buffer); void copyToVarchar(VarCharVector in, final int from, final int to); boolean isIndexSafe(int index); @Override TransferPair getTransferPair(String ref, BufferAllocator allocator); @Override TransferPair makeTransferPair(ValueVector to); }
@Test public void testHandlePassesNoopTracesByDefault() throws RpcException { setup(false); server.handle(connection, GET_CATALOGS_VALUE, pBody, dBody, responseSender); verify(ingestor).feedWork(eq(connection), eq(GET_CATALOGS_VALUE), eq(pBody), eq(dBody), captorSender.capture()); assertEquals(tracer.finishedSpans().size(), 0); verifySendResponse(0); }
@Override protected Response handle(UserClientConnectionImpl connection, int rpcType, byte[] pBody, ByteBuf dBody) throws RpcException { throw new IllegalStateException("UserRPCServer#handle must not be invoked without ResponseSender"); }
UserRPCServer extends BasicServer<RpcType, UserRPCServer.UserClientConnectionImpl> { @Override protected Response handle(UserClientConnectionImpl connection, int rpcType, byte[] pBody, ByteBuf dBody) throws RpcException { throw new IllegalStateException("UserRPCServer#handle must not be invoked without ResponseSender"); } }
UserRPCServer extends BasicServer<RpcType, UserRPCServer.UserClientConnectionImpl> { @Override protected Response handle(UserClientConnectionImpl connection, int rpcType, byte[] pBody, ByteBuf dBody) throws RpcException { throw new IllegalStateException("UserRPCServer#handle must not be invoked without ResponseSender"); } @VisibleForTesting UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, WorkIngestor workIngestor, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); }
UserRPCServer extends BasicServer<RpcType, UserRPCServer.UserClientConnectionImpl> { @Override protected Response handle(UserClientConnectionImpl connection, int rpcType, byte[] pBody, ByteBuf dBody) throws RpcException { throw new IllegalStateException("UserRPCServer#handle must not be invoked without ResponseSender"); } @VisibleForTesting UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, WorkIngestor workIngestor, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); @Override UserClientConnectionImpl initRemoteConnection(SocketChannel channel); }
UserRPCServer extends BasicServer<RpcType, UserRPCServer.UserClientConnectionImpl> { @Override protected Response handle(UserClientConnectionImpl connection, int rpcType, byte[] pBody, ByteBuf dBody) throws RpcException { throw new IllegalStateException("UserRPCServer#handle must not be invoked without ResponseSender"); } @VisibleForTesting UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, WorkIngestor workIngestor, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); @Override UserClientConnectionImpl initRemoteConnection(SocketChannel channel); }
@Test public void testHandleCreatesSpansFromTracerWhenTracingEnabled() throws RpcException { setup(true); server.handle(connection, GET_SCHEMAS_VALUE, pBody, dBody, responseSender); verify(ingestor).feedWork(eq(connection), eq(GET_SCHEMAS_VALUE), eq(pBody), eq(dBody), captorSender.capture()); assertEquals(tracer.finishedSpans().size(), 0); verifySendResponse(1); assertEquals("GET_SCHEMAS", tracer.finishedSpans().get(0).tags().get("rpc_type")); }
@Override protected Response handle(UserClientConnectionImpl connection, int rpcType, byte[] pBody, ByteBuf dBody) throws RpcException { throw new IllegalStateException("UserRPCServer#handle must not be invoked without ResponseSender"); }
UserRPCServer extends BasicServer<RpcType, UserRPCServer.UserClientConnectionImpl> { @Override protected Response handle(UserClientConnectionImpl connection, int rpcType, byte[] pBody, ByteBuf dBody) throws RpcException { throw new IllegalStateException("UserRPCServer#handle must not be invoked without ResponseSender"); } }
UserRPCServer extends BasicServer<RpcType, UserRPCServer.UserClientConnectionImpl> { @Override protected Response handle(UserClientConnectionImpl connection, int rpcType, byte[] pBody, ByteBuf dBody) throws RpcException { throw new IllegalStateException("UserRPCServer#handle must not be invoked without ResponseSender"); } @VisibleForTesting UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, WorkIngestor workIngestor, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); }
UserRPCServer extends BasicServer<RpcType, UserRPCServer.UserClientConnectionImpl> { @Override protected Response handle(UserClientConnectionImpl connection, int rpcType, byte[] pBody, ByteBuf dBody) throws RpcException { throw new IllegalStateException("UserRPCServer#handle must not be invoked without ResponseSender"); } @VisibleForTesting UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, WorkIngestor workIngestor, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); @Override UserClientConnectionImpl initRemoteConnection(SocketChannel channel); }
UserRPCServer extends BasicServer<RpcType, UserRPCServer.UserClientConnectionImpl> { @Override protected Response handle(UserClientConnectionImpl connection, int rpcType, byte[] pBody, ByteBuf dBody) throws RpcException { throw new IllegalStateException("UserRPCServer#handle must not be invoked without ResponseSender"); } @VisibleForTesting UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, WorkIngestor workIngestor, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); @Override UserClientConnectionImpl initRemoteConnection(SocketChannel channel); }
@Test public void testHandleSpansWhileSendingFailure() throws RpcException { setup(true); server.handle(connection, GET_CATALOGS_VALUE, pBody, dBody, responseSender); verify(ingestor).feedWork(eq(connection), eq(GET_CATALOGS_VALUE), eq(pBody), eq(dBody), captorSender.capture()); assertEquals(tracer.finishedSpans().size(), 0); UserRpcException r = mock(UserRpcException.class); verifyZeroInteractions(responseSender); captorSender.getValue().sendFailure(r); verify(responseSender).sendFailure(r); assertEquals(1, tracer.finishedSpans().size()); assertEquals("GET_CATALOGS", tracer.finishedSpans().get(0).tags().get("rpc_type")); }
@Override protected Response handle(UserClientConnectionImpl connection, int rpcType, byte[] pBody, ByteBuf dBody) throws RpcException { throw new IllegalStateException("UserRPCServer#handle must not be invoked without ResponseSender"); }
UserRPCServer extends BasicServer<RpcType, UserRPCServer.UserClientConnectionImpl> { @Override protected Response handle(UserClientConnectionImpl connection, int rpcType, byte[] pBody, ByteBuf dBody) throws RpcException { throw new IllegalStateException("UserRPCServer#handle must not be invoked without ResponseSender"); } }
UserRPCServer extends BasicServer<RpcType, UserRPCServer.UserClientConnectionImpl> { @Override protected Response handle(UserClientConnectionImpl connection, int rpcType, byte[] pBody, ByteBuf dBody) throws RpcException { throw new IllegalStateException("UserRPCServer#handle must not be invoked without ResponseSender"); } @VisibleForTesting UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, WorkIngestor workIngestor, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); }
UserRPCServer extends BasicServer<RpcType, UserRPCServer.UserClientConnectionImpl> { @Override protected Response handle(UserClientConnectionImpl connection, int rpcType, byte[] pBody, ByteBuf dBody) throws RpcException { throw new IllegalStateException("UserRPCServer#handle must not be invoked without ResponseSender"); } @VisibleForTesting UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, WorkIngestor workIngestor, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); @Override UserClientConnectionImpl initRemoteConnection(SocketChannel channel); }
UserRPCServer extends BasicServer<RpcType, UserRPCServer.UserClientConnectionImpl> { @Override protected Response handle(UserClientConnectionImpl connection, int rpcType, byte[] pBody, ByteBuf dBody) throws RpcException { throw new IllegalStateException("UserRPCServer#handle must not be invoked without ResponseSender"); } @VisibleForTesting UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, WorkIngestor workIngestor, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); @Override UserClientConnectionImpl initRemoteConnection(SocketChannel channel); }
@Test public void testHandleFinishesSpanIfFeedFailure() throws RpcException { WorkIngestor ingest = (con, rpc, pb, db, sender) -> { throw new RpcException(); }; setup(ingest, true); try { server.handle(connection, CANCEL_QUERY_VALUE, pBody, dBody, responseSender); } catch (RpcException e) { } assertEquals(1, tracer.finishedSpans().size()); assertEquals("CANCEL_QUERY", tracer.finishedSpans().get(0).tags().get("rpc_type")); }
@Override protected Response handle(UserClientConnectionImpl connection, int rpcType, byte[] pBody, ByteBuf dBody) throws RpcException { throw new IllegalStateException("UserRPCServer#handle must not be invoked without ResponseSender"); }
UserRPCServer extends BasicServer<RpcType, UserRPCServer.UserClientConnectionImpl> { @Override protected Response handle(UserClientConnectionImpl connection, int rpcType, byte[] pBody, ByteBuf dBody) throws RpcException { throw new IllegalStateException("UserRPCServer#handle must not be invoked without ResponseSender"); } }
UserRPCServer extends BasicServer<RpcType, UserRPCServer.UserClientConnectionImpl> { @Override protected Response handle(UserClientConnectionImpl connection, int rpcType, byte[] pBody, ByteBuf dBody) throws RpcException { throw new IllegalStateException("UserRPCServer#handle must not be invoked without ResponseSender"); } @VisibleForTesting UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, WorkIngestor workIngestor, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); }
UserRPCServer extends BasicServer<RpcType, UserRPCServer.UserClientConnectionImpl> { @Override protected Response handle(UserClientConnectionImpl connection, int rpcType, byte[] pBody, ByteBuf dBody) throws RpcException { throw new IllegalStateException("UserRPCServer#handle must not be invoked without ResponseSender"); } @VisibleForTesting UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, WorkIngestor workIngestor, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); @Override UserClientConnectionImpl initRemoteConnection(SocketChannel channel); }
UserRPCServer extends BasicServer<RpcType, UserRPCServer.UserClientConnectionImpl> { @Override protected Response handle(UserClientConnectionImpl connection, int rpcType, byte[] pBody, ByteBuf dBody) throws RpcException { throw new IllegalStateException("UserRPCServer#handle must not be invoked without ResponseSender"); } @VisibleForTesting UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, WorkIngestor workIngestor, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); UserRPCServer( RpcConfig rpcConfig, Provider<UserService> userServiceProvider, Provider<NodeEndpoint> nodeEndpointProvider, Provider<UserWorker> worker, BufferAllocator allocator, EventLoopGroup eventLoopGroup, InboundImpersonationManager impersonationManager, Tracer tracer, OptionValidatorListing optionValidatorListing ); @Override UserClientConnectionImpl initRemoteConnection(SocketChannel channel); }
@Test public void testQueriesClerk() throws Exception{ WorkloadTicketDepot ticketDepot = new WorkloadTicketDepot(mockedRootAlloc, mock(SabotConfig.class), DUMMY_GROUP_MANAGER); QueriesClerk clerk = makeClerk(ticketDepot); assertLivePhasesCount(clerk, 0); int baseNumAllocators = getNumAllocators(); UserBitShared.QueryId queryId = UserBitShared.QueryId.newBuilder().setPart1(12).setPart2(23).build(); QueryTicketGetter qtg1 = new QueryTicketGetter(); clerk.buildAndStartQuery(getDummyPlan(queryId,1,0), getDummySchedulingInfo(), qtg1); QueryTicket queryTicket = qtg1.getObtainedTicket(); assertNotNull(queryTicket); assertLivePhasesCount(clerk, 0); assertEquals(baseNumAllocators + 1, getNumAllocators()); FragmentTicket ticket10 = clerk .newFragmentTicket(queryTicket, getDummyPlan(queryId,1,0), getDummySchedulingInfo()); assertLivePhasesCount(clerk, 1); assertEquals(baseNumAllocators + 2, getNumAllocators()); FragmentTicket ticket11 = clerk .newFragmentTicket(queryTicket, getDummyPlan(queryId,1,1), getDummySchedulingInfo()); assertLivePhasesCount(clerk, 1); assertEquals(baseNumAllocators + 2, getNumAllocators()); FragmentTicket ticket20 = clerk .newFragmentTicket(queryTicket, getDummyPlan(queryId,2,0), getDummySchedulingInfo()); assertLivePhasesCount(clerk, 2); assertEquals(baseNumAllocators + 3, getNumAllocators()); qtg1.close(); assertEquals(baseNumAllocators + 3, getNumAllocators()); ticket10.close(); assertLivePhasesCount(clerk, 2); assertEquals(baseNumAllocators + 3, getNumAllocators()); ticket20.close(); assertLivePhasesCount(clerk, 1); assertEquals(baseNumAllocators + 2, getNumAllocators()); ticket11.close(); assertLivePhasesCount(clerk, 0); assertEquals(baseNumAllocators, getNumAllocators()); AutoCloseables.close(ticketDepot); }
QueriesClerk(final WorkloadTicketDepot workloadTicketDepot) { this.workloadTicketDepot = workloadTicketDepot; }
QueriesClerk { QueriesClerk(final WorkloadTicketDepot workloadTicketDepot) { this.workloadTicketDepot = workloadTicketDepot; } }
QueriesClerk { QueriesClerk(final WorkloadTicketDepot workloadTicketDepot) { this.workloadTicketDepot = workloadTicketDepot; } QueriesClerk(final WorkloadTicketDepot workloadTicketDepot); }
QueriesClerk { QueriesClerk(final WorkloadTicketDepot workloadTicketDepot) { this.workloadTicketDepot = workloadTicketDepot; } QueriesClerk(final WorkloadTicketDepot workloadTicketDepot); void buildAndStartQuery(final PlanFragmentFull firstFragment, final SchedulingInfo schedulingInfo, final QueryStarter queryStarter); FragmentTicket newFragmentTicket(final QueryTicket queryTicket, final PlanFragmentFull fragment, final SchedulingInfo schedulingInfo); }
QueriesClerk { QueriesClerk(final WorkloadTicketDepot workloadTicketDepot) { this.workloadTicketDepot = workloadTicketDepot; } QueriesClerk(final WorkloadTicketDepot workloadTicketDepot); void buildAndStartQuery(final PlanFragmentFull firstFragment, final SchedulingInfo schedulingInfo, final QueryStarter queryStarter); FragmentTicket newFragmentTicket(final QueryTicket queryTicket, final PlanFragmentFull fragment, final SchedulingInfo schedulingInfo); }
@Test public void testGetFragmentTickets() throws Exception { WorkloadTicketDepot ticketDepot = new WorkloadTicketDepot(mockedRootAlloc, mock(SabotConfig.class), DUMMY_GROUP_MANAGER); QueriesClerk clerk = makeClerk(ticketDepot); UserBitShared.QueryId queryId = UserBitShared.QueryId.newBuilder().setPart1(12).setPart2(23).build(); QueryTicketGetter qtg1 = new QueryTicketGetter(); clerk.buildAndStartQuery(getDummyPlan(queryId,1,0), getDummySchedulingInfo(), qtg1); QueryTicket queryTicket = qtg1.getObtainedTicket(); assertNotNull(queryTicket); Set<FragmentTicket> expected = new HashSet<>(); int numMajors = 3; int numMinors = 5; for (int i = 0; i < numMajors; ++i) { for (int j = 0; j < numMinors; ++j) { FragmentTicket fragmentTicket = clerk .newFragmentTicket(queryTicket, getDummyPlan(queryId, i, j), getDummySchedulingInfo()); expected.add(fragmentTicket); } } qtg1.close(); Collection<FragmentTicket> actual = clerk.getFragmentTickets(queryId); assertEquals(expected.size(), actual.size()); assertTrue(expected.containsAll(actual)); for (FragmentTicket ticket : expected) { ticket.close(); } AutoCloseables.close(ticketDepot); }
Collection<FragmentTicket> getFragmentTickets(QueryId queryId) { List<FragmentTicket> fragmentTickets = new ArrayList<>(); for (WorkloadTicket workloadTicket : getWorkloadTickets()) { QueryTicket queryTicket = workloadTicket.getQueryTicket(queryId); if (queryTicket != null) { for (PhaseTicket phaseTicket : queryTicket.getActivePhaseTickets()) { fragmentTickets.addAll(phaseTicket.getFragmentTickets()); } break; } } return fragmentTickets; }
QueriesClerk { Collection<FragmentTicket> getFragmentTickets(QueryId queryId) { List<FragmentTicket> fragmentTickets = new ArrayList<>(); for (WorkloadTicket workloadTicket : getWorkloadTickets()) { QueryTicket queryTicket = workloadTicket.getQueryTicket(queryId); if (queryTicket != null) { for (PhaseTicket phaseTicket : queryTicket.getActivePhaseTickets()) { fragmentTickets.addAll(phaseTicket.getFragmentTickets()); } break; } } return fragmentTickets; } }
QueriesClerk { Collection<FragmentTicket> getFragmentTickets(QueryId queryId) { List<FragmentTicket> fragmentTickets = new ArrayList<>(); for (WorkloadTicket workloadTicket : getWorkloadTickets()) { QueryTicket queryTicket = workloadTicket.getQueryTicket(queryId); if (queryTicket != null) { for (PhaseTicket phaseTicket : queryTicket.getActivePhaseTickets()) { fragmentTickets.addAll(phaseTicket.getFragmentTickets()); } break; } } return fragmentTickets; } QueriesClerk(final WorkloadTicketDepot workloadTicketDepot); }
QueriesClerk { Collection<FragmentTicket> getFragmentTickets(QueryId queryId) { List<FragmentTicket> fragmentTickets = new ArrayList<>(); for (WorkloadTicket workloadTicket : getWorkloadTickets()) { QueryTicket queryTicket = workloadTicket.getQueryTicket(queryId); if (queryTicket != null) { for (PhaseTicket phaseTicket : queryTicket.getActivePhaseTickets()) { fragmentTickets.addAll(phaseTicket.getFragmentTickets()); } break; } } return fragmentTickets; } QueriesClerk(final WorkloadTicketDepot workloadTicketDepot); void buildAndStartQuery(final PlanFragmentFull firstFragment, final SchedulingInfo schedulingInfo, final QueryStarter queryStarter); FragmentTicket newFragmentTicket(final QueryTicket queryTicket, final PlanFragmentFull fragment, final SchedulingInfo schedulingInfo); }
QueriesClerk { Collection<FragmentTicket> getFragmentTickets(QueryId queryId) { List<FragmentTicket> fragmentTickets = new ArrayList<>(); for (WorkloadTicket workloadTicket : getWorkloadTickets()) { QueryTicket queryTicket = workloadTicket.getQueryTicket(queryId); if (queryTicket != null) { for (PhaseTicket phaseTicket : queryTicket.getActivePhaseTickets()) { fragmentTickets.addAll(phaseTicket.getFragmentTickets()); } break; } } return fragmentTickets; } QueriesClerk(final WorkloadTicketDepot workloadTicketDepot); void buildAndStartQuery(final PlanFragmentFull firstFragment, final SchedulingInfo schedulingInfo, final QueryStarter queryStarter); FragmentTicket newFragmentTicket(final QueryTicket queryTicket, final PlanFragmentFull fragment, final SchedulingInfo schedulingInfo); }
@Test public void testMutatorApplyNoDropPreview() { boolean preview = true; TransformResult result = mutator(preview).apply("foo", "foo2", newValue, false); assertEquals(newHashSet("foo2"), result.getAddedColumns()); assertEquals(newHashSet(), result.getModifiedColumns()); assertEquals(newHashSet(), result.getRemovedColumns()); assertColIs(newValue, result, "foo2"); assertColIs(value, result, "foo"); }
public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } DatasetStateMutator(String username, VirtualDatasetState virtualDatasetState, boolean preview); }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } DatasetStateMutator(String username, VirtualDatasetState virtualDatasetState, boolean preview); void setSql(QueryMetadata metadata); void addColumn(int index, Column column); void addColumn(Column column); void moveColumn(int index, int dest); int columnCount(); void addJoin(Join join); void updateColumnTables(); String uniqueColumnName(String column); String getDatasetAlias(); void groupedBy(List<Column> newColumns, List<Column> groupBys); void addFilter(Filter filter); void setOrdersList(List<Order> columnsList); TransformResult result(); int indexOfCol(String colName); void nest(); TransformResult rename(String oldCol, String newCol); TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn); TransformResult apply(String oldCol, String newCol, Expression newExpWrapped, boolean dropSourceColumn); Expression findColValueForModification(String colName); Expression findColValue(String colName); boolean isGrouped(); void dropColumn(String droppedColumnName); }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } DatasetStateMutator(String username, VirtualDatasetState virtualDatasetState, boolean preview); void setSql(QueryMetadata metadata); void addColumn(int index, Column column); void addColumn(Column column); void moveColumn(int index, int dest); int columnCount(); void addJoin(Join join); void updateColumnTables(); String uniqueColumnName(String column); String getDatasetAlias(); void groupedBy(List<Column> newColumns, List<Column> groupBys); void addFilter(Filter filter); void setOrdersList(List<Order> columnsList); TransformResult result(); int indexOfCol(String colName); void nest(); TransformResult rename(String oldCol, String newCol); TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn); TransformResult apply(String oldCol, String newCol, Expression newExpWrapped, boolean dropSourceColumn); Expression findColValueForModification(String colName); Expression findColValue(String colName); boolean isGrouped(); void dropColumn(String droppedColumnName); }
@Test public void testTasksWithoutUUID() throws Exception { DACConfig dacConfig = DACConfig.newConfig(); Upgrade upgrade = new Upgrade(dacConfig, CLASSPATH_SCAN_RESULT, false); List<? extends UpgradeTask> tasks = upgrade.getUpgradeTasks(); tasks.forEach(task -> assertNotNull( String.format( "Need to add UUID to task: '%s'. For example: %s", task.getTaskName(), UUID.randomUUID().toString()), task.getTaskUUID())); }
@VisibleForTesting List<? extends UpgradeTask> getUpgradeTasks() { return upgradeTasks; }
Upgrade { @VisibleForTesting List<? extends UpgradeTask> getUpgradeTasks() { return upgradeTasks; } }
Upgrade { @VisibleForTesting List<? extends UpgradeTask> getUpgradeTasks() { return upgradeTasks; } Upgrade(DACConfig dacConfig, ScanResult classPathScan, boolean verbose); }
Upgrade { @VisibleForTesting List<? extends UpgradeTask> getUpgradeTasks() { return upgradeTasks; } Upgrade(DACConfig dacConfig, ScanResult classPathScan, boolean verbose); void run(); void run(boolean noDBOpenRetry); @VisibleForTesting void validateUpgrade(final LegacyKVStoreProvider storeProvider, final String curEdition); void run(final LegacyKVStoreProvider storeProvider); static void main(String[] args); }
Upgrade { @VisibleForTesting List<? extends UpgradeTask> getUpgradeTasks() { return upgradeTasks; } Upgrade(DACConfig dacConfig, ScanResult classPathScan, boolean verbose); void run(); void run(boolean noDBOpenRetry); @VisibleForTesting void validateUpgrade(final LegacyKVStoreProvider storeProvider, final String curEdition); void run(final LegacyKVStoreProvider storeProvider); static void main(String[] args); static final Comparator<Version> UPGRADE_VERSION_ORDERING; }
@Test public void testOldThreadsArePruned() throws InterruptedException { Thread t = new Thread() { public void run () { try { sleep(400l); } catch (InterruptedException e) { } } }; Thread t1 = new Thread() { public void run () { try { sleep(400l); } catch (InterruptedException e) { } } }; t.start(); t1.start(); ThreadsStatsCollector collector = new ThreadsStatsCollector(50L, Sets.newHashSet(t1.getId())); collector.start(); sleep(200l); Integer stat = collector.getCpuTrailingAverage(t.getId(), 1); Integer statThread2 = collector.getCpuTrailingAverage(t1.getId(), 1); Assert.assertTrue(stat == null); Assert.assertTrue(statThread2 != null); t.join(); t1.join(); stat = collector.getCpuTrailingAverage(t.getId(), 1); Assert.assertTrue(stat == null); }
public Integer getCpuTrailingAverage(long id, int seconds) { return cpuStat.getTrailingAverage(id, seconds); }
ThreadsStatsCollector extends Thread implements AutoCloseable { public Integer getCpuTrailingAverage(long id, int seconds) { return cpuStat.getTrailingAverage(id, seconds); } }
ThreadsStatsCollector extends Thread implements AutoCloseable { public Integer getCpuTrailingAverage(long id, int seconds) { return cpuStat.getTrailingAverage(id, seconds); } ThreadsStatsCollector(Set<Long> slicingThreadIds); ThreadsStatsCollector(long collectionIntervalInMilliSeconds, Set<Long> slicingThreadIds); }
ThreadsStatsCollector extends Thread implements AutoCloseable { public Integer getCpuTrailingAverage(long id, int seconds) { return cpuStat.getTrailingAverage(id, seconds); } ThreadsStatsCollector(Set<Long> slicingThreadIds); ThreadsStatsCollector(long collectionIntervalInMilliSeconds, Set<Long> slicingThreadIds); @Override void run(); Integer getCpuTrailingAverage(long id, int seconds); Integer getUserTrailingAverage(long id, int seconds); void close(); }
ThreadsStatsCollector extends Thread implements AutoCloseable { public Integer getCpuTrailingAverage(long id, int seconds) { return cpuStat.getTrailingAverage(id, seconds); } ThreadsStatsCollector(Set<Long> slicingThreadIds); ThreadsStatsCollector(long collectionIntervalInMilliSeconds, Set<Long> slicingThreadIds); @Override void run(); Integer getCpuTrailingAverage(long id, int seconds); Integer getUserTrailingAverage(long id, int seconds); void close(); }
@Test public void testSendRuntimeFilterToProbeScan() { int probeScanId = 2; int probeOpId = 131074; int buildMajorFragment = 1; int buildMinorFragment = 2; int buildOpId = 65541; QueryId queryId = QueryId.newBuilder().build(); try (ArrowBuf recvBuffer = bfTestAllocator.buffer(64)) { recvBuffer.setBytes(0, new byte[64]); FragmentHandle fh = FragmentHandle.newBuilder() .setQueryId(queryId).setMajorFragmentId(buildMajorFragment).setMinorFragmentId(buildMinorFragment).build(); TunnelProvider tunnelProvider = mock(TunnelProvider.class); AccountingExecTunnel tunnel = mock(AccountingExecTunnel.class); doNothing().when(tunnel).sendOOBMessage(any(OutOfBandMessage.class)); when(tunnelProvider.getExecTunnel(any(NodeEndpoint.class))).thenReturn(tunnel); OperatorContext opCtx = mockOpContext(fh); EndpointsIndex ei = mock(EndpointsIndex.class); NodeEndpoint node1 = NodeEndpoint.newBuilder().build(); when(ei.getNodeEndpoint(any(Integer.class))).thenReturn(node1); when(opCtx.getEndpointsIndex()).thenReturn(ei); when(opCtx.getTunnelProvider()).thenReturn(tunnelProvider); HashJoinPOP popConfig = mockPopConfig(newRuntimeFilterInfo(false, "col1")); OpProps props = mock(OpProps.class); when(props.getOperatorId()).thenReturn(buildOpId); when(popConfig.getProps()).thenReturn(props); VectorizedHashJoinOperator joinOp = spy(new VectorizedHashJoinOperator(opCtx, popConfig)); BloomFilter bloomFilter = mockedBloom().get(); when(bloomFilter.getDataBuffer()).thenReturn(recvBuffer); when(bloomFilter.getExpectedFPP()).thenReturn(0.001D); RuntimeFilter filter = RuntimeFilter.newBuilder() .setProbeScanMajorFragmentId(probeScanId) .setProbeScanOperatorId(probeOpId) .setPartitionColumnFilter(ExecProtos.CompositeColumnFilter.newBuilder() .addColumns("col1") .setFilterType(ExecProtos.RuntimeFilterType.BLOOM_FILTER) .setSizeBytes(64).build()) .build(); FragmentAssignment assignment1 = FragmentAssignment.newBuilder() .addAllMinorFragmentId(Lists.newArrayList(1, 3, 5)).setAssignmentIndex(1).build(); FragmentAssignment assignment2 = FragmentAssignment.newBuilder() .addAllMinorFragmentId(Lists.newArrayList(0, 2, 4)).setAssignmentIndex(2).build(); MajorFragmentAssignment majorFragmentAssignment = MajorFragmentAssignment.newBuilder() .setMajorFragmentId(probeScanId) .addAllAllAssignment(Lists.newArrayList(assignment1, assignment2)) .build(); when(opCtx.getExtMajorFragmentAssignments(eq(probeScanId))).thenReturn(majorFragmentAssignment); ArgumentCaptor<OutOfBandMessage> oobMessageCaptor = ArgumentCaptor.forClass(OutOfBandMessage.class); joinOp.sendRuntimeFilterToProbeScan(filter, Optional.of(bloomFilter)); verify(tunnel, times(2)).sendOOBMessage(oobMessageCaptor.capture()); for (int assignment = 0; assignment <= 1; assignment++) { assertEquals(queryId, oobMessageCaptor.getAllValues().get(assignment).getQueryId()); assertEquals(probeScanId, oobMessageCaptor.getAllValues().get(assignment).getMajorFragmentId()); assertEquals(probeOpId, oobMessageCaptor.getAllValues().get(assignment).getOperatorId()); assertEquals(buildMajorFragment, oobMessageCaptor.getAllValues().get(assignment).getSendingMajorFragmentId()); assertEquals(buildMinorFragment, oobMessageCaptor.getAllValues().get(assignment).getSendingMinorFragmentId()); } assertEquals(Lists.newArrayList(1,3,5), oobMessageCaptor.getAllValues().get(0).getTargetMinorFragmentIds()); assertEquals(Lists.newArrayList(0,2,4), oobMessageCaptor.getAllValues().get(1).getTargetMinorFragmentIds()); assertEquals(2, recvBuffer.refCnt()); recvBuffer.close(); } }
@VisibleForTesting void sendRuntimeFilterToProbeScan(RuntimeFilter filter, Optional<BloomFilter> partitionColFilter) { logger.debug("Sending join runtime filter to probe scan {}:{}, Filter {}", filter.getProbeScanOperatorId(), filter.getProbeScanMajorFragmentId(), partitionColFilter); logger.debug("Partition col filter fpp {}", partitionColFilter.map(BloomFilter::getExpectedFPP).orElse(-1D)); final MajorFragmentAssignment majorFragmentAssignment = context.getExtMajorFragmentAssignments(filter.getProbeScanMajorFragmentId()); try(ArrowBuf bloomFilterBuf = partitionColFilter.map(bf -> bf.getDataBuffer()).orElse(null)) { if (majorFragmentAssignment==null) { logger.warn("Major fragment assignment for probe scan id {} is null. Dropping the runtime filter.", filter.getProbeScanOperatorId()); return; } for (FragmentAssignment assignment : majorFragmentAssignment.getAllAssignmentList()) { try (RollbackCloseable closeOnErrSend = new RollbackCloseable()) { logger.info("Sending filter to OpId {}, Frag {}:{}", filter.getProbeScanOperatorId(), filter.getProbeScanMajorFragmentId(), assignment.getMinorFragmentIdList()); final OutOfBandMessage message = new OutOfBandMessage( context.getFragmentHandle().getQueryId(), filter.getProbeScanMajorFragmentId(), assignment.getMinorFragmentIdList(), filter.getProbeScanOperatorId(), context.getFragmentHandle().getMajorFragmentId(), context.getFragmentHandle().getMinorFragmentId(), config.getProps().getOperatorId(), new OutOfBandMessage.Payload(filter), bloomFilterBuf, true); closeOnErrSend.add(bloomFilterBuf); final NodeEndpoint endpoint = context.getEndpointsIndex().getNodeEndpoint(assignment.getAssignmentIndex()); context.getTunnelProvider().getExecTunnel(endpoint).sendOOBMessage(message); closeOnErrSend.commit(); } catch (Exception e) { logger.warn("Error while sending runtime filter to minor fragments " + assignment.getMinorFragmentIdList(), e); } } } }
VectorizedHashJoinOperator implements DualInputOperator { @VisibleForTesting void sendRuntimeFilterToProbeScan(RuntimeFilter filter, Optional<BloomFilter> partitionColFilter) { logger.debug("Sending join runtime filter to probe scan {}:{}, Filter {}", filter.getProbeScanOperatorId(), filter.getProbeScanMajorFragmentId(), partitionColFilter); logger.debug("Partition col filter fpp {}", partitionColFilter.map(BloomFilter::getExpectedFPP).orElse(-1D)); final MajorFragmentAssignment majorFragmentAssignment = context.getExtMajorFragmentAssignments(filter.getProbeScanMajorFragmentId()); try(ArrowBuf bloomFilterBuf = partitionColFilter.map(bf -> bf.getDataBuffer()).orElse(null)) { if (majorFragmentAssignment==null) { logger.warn("Major fragment assignment for probe scan id {} is null. Dropping the runtime filter.", filter.getProbeScanOperatorId()); return; } for (FragmentAssignment assignment : majorFragmentAssignment.getAllAssignmentList()) { try (RollbackCloseable closeOnErrSend = new RollbackCloseable()) { logger.info("Sending filter to OpId {}, Frag {}:{}", filter.getProbeScanOperatorId(), filter.getProbeScanMajorFragmentId(), assignment.getMinorFragmentIdList()); final OutOfBandMessage message = new OutOfBandMessage( context.getFragmentHandle().getQueryId(), filter.getProbeScanMajorFragmentId(), assignment.getMinorFragmentIdList(), filter.getProbeScanOperatorId(), context.getFragmentHandle().getMajorFragmentId(), context.getFragmentHandle().getMinorFragmentId(), config.getProps().getOperatorId(), new OutOfBandMessage.Payload(filter), bloomFilterBuf, true); closeOnErrSend.add(bloomFilterBuf); final NodeEndpoint endpoint = context.getEndpointsIndex().getNodeEndpoint(assignment.getAssignmentIndex()); context.getTunnelProvider().getExecTunnel(endpoint).sendOOBMessage(message); closeOnErrSend.commit(); } catch (Exception e) { logger.warn("Error while sending runtime filter to minor fragments " + assignment.getMinorFragmentIdList(), e); } } } } }
VectorizedHashJoinOperator implements DualInputOperator { @VisibleForTesting void sendRuntimeFilterToProbeScan(RuntimeFilter filter, Optional<BloomFilter> partitionColFilter) { logger.debug("Sending join runtime filter to probe scan {}:{}, Filter {}", filter.getProbeScanOperatorId(), filter.getProbeScanMajorFragmentId(), partitionColFilter); logger.debug("Partition col filter fpp {}", partitionColFilter.map(BloomFilter::getExpectedFPP).orElse(-1D)); final MajorFragmentAssignment majorFragmentAssignment = context.getExtMajorFragmentAssignments(filter.getProbeScanMajorFragmentId()); try(ArrowBuf bloomFilterBuf = partitionColFilter.map(bf -> bf.getDataBuffer()).orElse(null)) { if (majorFragmentAssignment==null) { logger.warn("Major fragment assignment for probe scan id {} is null. Dropping the runtime filter.", filter.getProbeScanOperatorId()); return; } for (FragmentAssignment assignment : majorFragmentAssignment.getAllAssignmentList()) { try (RollbackCloseable closeOnErrSend = new RollbackCloseable()) { logger.info("Sending filter to OpId {}, Frag {}:{}", filter.getProbeScanOperatorId(), filter.getProbeScanMajorFragmentId(), assignment.getMinorFragmentIdList()); final OutOfBandMessage message = new OutOfBandMessage( context.getFragmentHandle().getQueryId(), filter.getProbeScanMajorFragmentId(), assignment.getMinorFragmentIdList(), filter.getProbeScanOperatorId(), context.getFragmentHandle().getMajorFragmentId(), context.getFragmentHandle().getMinorFragmentId(), config.getProps().getOperatorId(), new OutOfBandMessage.Payload(filter), bloomFilterBuf, true); closeOnErrSend.add(bloomFilterBuf); final NodeEndpoint endpoint = context.getEndpointsIndex().getNodeEndpoint(assignment.getAssignmentIndex()); context.getTunnelProvider().getExecTunnel(endpoint).sendOOBMessage(message); closeOnErrSend.commit(); } catch (Exception e) { logger.warn("Error while sending runtime filter to minor fragments " + assignment.getMinorFragmentIdList(), e); } } } } VectorizedHashJoinOperator(OperatorContext context, HashJoinPOP popConfig); }
VectorizedHashJoinOperator implements DualInputOperator { @VisibleForTesting void sendRuntimeFilterToProbeScan(RuntimeFilter filter, Optional<BloomFilter> partitionColFilter) { logger.debug("Sending join runtime filter to probe scan {}:{}, Filter {}", filter.getProbeScanOperatorId(), filter.getProbeScanMajorFragmentId(), partitionColFilter); logger.debug("Partition col filter fpp {}", partitionColFilter.map(BloomFilter::getExpectedFPP).orElse(-1D)); final MajorFragmentAssignment majorFragmentAssignment = context.getExtMajorFragmentAssignments(filter.getProbeScanMajorFragmentId()); try(ArrowBuf bloomFilterBuf = partitionColFilter.map(bf -> bf.getDataBuffer()).orElse(null)) { if (majorFragmentAssignment==null) { logger.warn("Major fragment assignment for probe scan id {} is null. Dropping the runtime filter.", filter.getProbeScanOperatorId()); return; } for (FragmentAssignment assignment : majorFragmentAssignment.getAllAssignmentList()) { try (RollbackCloseable closeOnErrSend = new RollbackCloseable()) { logger.info("Sending filter to OpId {}, Frag {}:{}", filter.getProbeScanOperatorId(), filter.getProbeScanMajorFragmentId(), assignment.getMinorFragmentIdList()); final OutOfBandMessage message = new OutOfBandMessage( context.getFragmentHandle().getQueryId(), filter.getProbeScanMajorFragmentId(), assignment.getMinorFragmentIdList(), filter.getProbeScanOperatorId(), context.getFragmentHandle().getMajorFragmentId(), context.getFragmentHandle().getMinorFragmentId(), config.getProps().getOperatorId(), new OutOfBandMessage.Payload(filter), bloomFilterBuf, true); closeOnErrSend.add(bloomFilterBuf); final NodeEndpoint endpoint = context.getEndpointsIndex().getNodeEndpoint(assignment.getAssignmentIndex()); context.getTunnelProvider().getExecTunnel(endpoint).sendOOBMessage(message); closeOnErrSend.commit(); } catch (Exception e) { logger.warn("Error while sending runtime filter to minor fragments " + assignment.getMinorFragmentIdList(), e); } } } } VectorizedHashJoinOperator(OperatorContext context, HashJoinPOP popConfig); @Override State getState(); VectorAccessible setup(VectorAccessible left, VectorAccessible right); @Override void consumeDataRight(int records); @Override void noMoreToConsumeRight(); @Override void consumeDataLeft(int records); @Override int outputData(); @Override void noMoreToConsumeLeft(); ArrowBuf newLinksBuffer(int recordCount); @Override OUT accept(OperatorVisitor<OUT, IN, EXCEP> visitor, IN value); @Override void workOnOOB(OutOfBandMessage message); @Override void close(); }
VectorizedHashJoinOperator implements DualInputOperator { @VisibleForTesting void sendRuntimeFilterToProbeScan(RuntimeFilter filter, Optional<BloomFilter> partitionColFilter) { logger.debug("Sending join runtime filter to probe scan {}:{}, Filter {}", filter.getProbeScanOperatorId(), filter.getProbeScanMajorFragmentId(), partitionColFilter); logger.debug("Partition col filter fpp {}", partitionColFilter.map(BloomFilter::getExpectedFPP).orElse(-1D)); final MajorFragmentAssignment majorFragmentAssignment = context.getExtMajorFragmentAssignments(filter.getProbeScanMajorFragmentId()); try(ArrowBuf bloomFilterBuf = partitionColFilter.map(bf -> bf.getDataBuffer()).orElse(null)) { if (majorFragmentAssignment==null) { logger.warn("Major fragment assignment for probe scan id {} is null. Dropping the runtime filter.", filter.getProbeScanOperatorId()); return; } for (FragmentAssignment assignment : majorFragmentAssignment.getAllAssignmentList()) { try (RollbackCloseable closeOnErrSend = new RollbackCloseable()) { logger.info("Sending filter to OpId {}, Frag {}:{}", filter.getProbeScanOperatorId(), filter.getProbeScanMajorFragmentId(), assignment.getMinorFragmentIdList()); final OutOfBandMessage message = new OutOfBandMessage( context.getFragmentHandle().getQueryId(), filter.getProbeScanMajorFragmentId(), assignment.getMinorFragmentIdList(), filter.getProbeScanOperatorId(), context.getFragmentHandle().getMajorFragmentId(), context.getFragmentHandle().getMinorFragmentId(), config.getProps().getOperatorId(), new OutOfBandMessage.Payload(filter), bloomFilterBuf, true); closeOnErrSend.add(bloomFilterBuf); final NodeEndpoint endpoint = context.getEndpointsIndex().getNodeEndpoint(assignment.getAssignmentIndex()); context.getTunnelProvider().getExecTunnel(endpoint).sendOOBMessage(message); closeOnErrSend.commit(); } catch (Exception e) { logger.warn("Error while sending runtime filter to minor fragments " + assignment.getMinorFragmentIdList(), e); } } } } VectorizedHashJoinOperator(OperatorContext context, HashJoinPOP popConfig); @Override State getState(); VectorAccessible setup(VectorAccessible left, VectorAccessible right); @Override void consumeDataRight(int records); @Override void noMoreToConsumeRight(); @Override void consumeDataLeft(int records); @Override int outputData(); @Override void noMoreToConsumeLeft(); ArrowBuf newLinksBuffer(int recordCount); @Override OUT accept(OperatorVisitor<OUT, IN, EXCEP> visitor, IN value); @Override void workOnOOB(OutOfBandMessage message); @Override void close(); static final int BATCH_MASK; }
@Test public void testSendRuntimeFilterToProbeScanMajorFragmentNotPresent() { int probeScanId = 2; int differentProbeScanId = 5; int probeOpId = 131074; int buildMajorFragment = 1; int buildMinorFragment = 2; int buildOpId = 65541; QueryId queryId = QueryId.newBuilder().build(); ArrowBuf recvBuffer = bfTestAllocator.buffer(64); FragmentHandle fh = FragmentHandle.newBuilder() .setQueryId(queryId).setMajorFragmentId(buildMajorFragment).setMinorFragmentId(buildMinorFragment).build(); TunnelProvider tunnelProvider = mock(TunnelProvider.class); AccountingExecTunnel tunnel = mock(AccountingExecTunnel.class); doNothing().when(tunnel).sendOOBMessage(any(OutOfBandMessage.class)); when(tunnelProvider.getExecTunnel(any(NodeEndpoint.class))).thenReturn(tunnel); OperatorContext opCtx = mockOpContext(fh); EndpointsIndex ei = mock(EndpointsIndex.class); NodeEndpoint node1 = NodeEndpoint.newBuilder().build(); when(ei.getNodeEndpoint(any(Integer.class))).thenReturn(node1); when(opCtx.getEndpointsIndex()).thenReturn(ei); when(opCtx.getTunnelProvider()).thenReturn(tunnelProvider); HashJoinPOP popConfig = mockPopConfig(newRuntimeFilterInfo(false, "col1")); OpProps props = mock(OpProps.class); when(props.getOperatorId()).thenReturn(buildOpId); when(popConfig.getProps()).thenReturn(props); VectorizedHashJoinOperator joinOp = spy(new VectorizedHashJoinOperator(opCtx, popConfig)); BloomFilter bloomFilter = mockedBloom().get(); when(bloomFilter.getDataBuffer()).thenReturn(recvBuffer); when(bloomFilter.getExpectedFPP()).thenReturn(0.001D); RuntimeFilter filter = RuntimeFilter.newBuilder() .setProbeScanMajorFragmentId(probeScanId) .setProbeScanOperatorId(probeOpId) .setPartitionColumnFilter(ExecProtos.CompositeColumnFilter.newBuilder() .addColumns("col1") .setFilterType(ExecProtos.RuntimeFilterType.BLOOM_FILTER) .setSizeBytes(64).build()) .build(); FragmentAssignment assignment1 = FragmentAssignment.newBuilder() .addAllMinorFragmentId(Lists.newArrayList(1, 3, 5)).setAssignmentIndex(1).build(); FragmentAssignment assignment2 = FragmentAssignment.newBuilder() .addAllMinorFragmentId(Lists.newArrayList(0, 2, 4)).setAssignmentIndex(2).build(); MajorFragmentAssignment majorFragmentAssignment = MajorFragmentAssignment.newBuilder() .setMajorFragmentId(differentProbeScanId) .addAllAllAssignment(Lists.newArrayList(assignment1, assignment2)) .build(); when(opCtx.getExtMajorFragmentAssignments(eq(differentProbeScanId))).thenReturn(majorFragmentAssignment); when(opCtx.getExtMajorFragmentAssignments(eq(probeScanId))).thenReturn(null); ArgumentCaptor<OutOfBandMessage> oobMessageCaptor = ArgumentCaptor.forClass(OutOfBandMessage.class); joinOp.sendRuntimeFilterToProbeScan(filter, Optional.of(bloomFilter)); verify(tunnel, never()).sendOOBMessage(oobMessageCaptor.capture()); }
@VisibleForTesting void sendRuntimeFilterToProbeScan(RuntimeFilter filter, Optional<BloomFilter> partitionColFilter) { logger.debug("Sending join runtime filter to probe scan {}:{}, Filter {}", filter.getProbeScanOperatorId(), filter.getProbeScanMajorFragmentId(), partitionColFilter); logger.debug("Partition col filter fpp {}", partitionColFilter.map(BloomFilter::getExpectedFPP).orElse(-1D)); final MajorFragmentAssignment majorFragmentAssignment = context.getExtMajorFragmentAssignments(filter.getProbeScanMajorFragmentId()); try(ArrowBuf bloomFilterBuf = partitionColFilter.map(bf -> bf.getDataBuffer()).orElse(null)) { if (majorFragmentAssignment==null) { logger.warn("Major fragment assignment for probe scan id {} is null. Dropping the runtime filter.", filter.getProbeScanOperatorId()); return; } for (FragmentAssignment assignment : majorFragmentAssignment.getAllAssignmentList()) { try (RollbackCloseable closeOnErrSend = new RollbackCloseable()) { logger.info("Sending filter to OpId {}, Frag {}:{}", filter.getProbeScanOperatorId(), filter.getProbeScanMajorFragmentId(), assignment.getMinorFragmentIdList()); final OutOfBandMessage message = new OutOfBandMessage( context.getFragmentHandle().getQueryId(), filter.getProbeScanMajorFragmentId(), assignment.getMinorFragmentIdList(), filter.getProbeScanOperatorId(), context.getFragmentHandle().getMajorFragmentId(), context.getFragmentHandle().getMinorFragmentId(), config.getProps().getOperatorId(), new OutOfBandMessage.Payload(filter), bloomFilterBuf, true); closeOnErrSend.add(bloomFilterBuf); final NodeEndpoint endpoint = context.getEndpointsIndex().getNodeEndpoint(assignment.getAssignmentIndex()); context.getTunnelProvider().getExecTunnel(endpoint).sendOOBMessage(message); closeOnErrSend.commit(); } catch (Exception e) { logger.warn("Error while sending runtime filter to minor fragments " + assignment.getMinorFragmentIdList(), e); } } } }
VectorizedHashJoinOperator implements DualInputOperator { @VisibleForTesting void sendRuntimeFilterToProbeScan(RuntimeFilter filter, Optional<BloomFilter> partitionColFilter) { logger.debug("Sending join runtime filter to probe scan {}:{}, Filter {}", filter.getProbeScanOperatorId(), filter.getProbeScanMajorFragmentId(), partitionColFilter); logger.debug("Partition col filter fpp {}", partitionColFilter.map(BloomFilter::getExpectedFPP).orElse(-1D)); final MajorFragmentAssignment majorFragmentAssignment = context.getExtMajorFragmentAssignments(filter.getProbeScanMajorFragmentId()); try(ArrowBuf bloomFilterBuf = partitionColFilter.map(bf -> bf.getDataBuffer()).orElse(null)) { if (majorFragmentAssignment==null) { logger.warn("Major fragment assignment for probe scan id {} is null. Dropping the runtime filter.", filter.getProbeScanOperatorId()); return; } for (FragmentAssignment assignment : majorFragmentAssignment.getAllAssignmentList()) { try (RollbackCloseable closeOnErrSend = new RollbackCloseable()) { logger.info("Sending filter to OpId {}, Frag {}:{}", filter.getProbeScanOperatorId(), filter.getProbeScanMajorFragmentId(), assignment.getMinorFragmentIdList()); final OutOfBandMessage message = new OutOfBandMessage( context.getFragmentHandle().getQueryId(), filter.getProbeScanMajorFragmentId(), assignment.getMinorFragmentIdList(), filter.getProbeScanOperatorId(), context.getFragmentHandle().getMajorFragmentId(), context.getFragmentHandle().getMinorFragmentId(), config.getProps().getOperatorId(), new OutOfBandMessage.Payload(filter), bloomFilterBuf, true); closeOnErrSend.add(bloomFilterBuf); final NodeEndpoint endpoint = context.getEndpointsIndex().getNodeEndpoint(assignment.getAssignmentIndex()); context.getTunnelProvider().getExecTunnel(endpoint).sendOOBMessage(message); closeOnErrSend.commit(); } catch (Exception e) { logger.warn("Error while sending runtime filter to minor fragments " + assignment.getMinorFragmentIdList(), e); } } } } }
VectorizedHashJoinOperator implements DualInputOperator { @VisibleForTesting void sendRuntimeFilterToProbeScan(RuntimeFilter filter, Optional<BloomFilter> partitionColFilter) { logger.debug("Sending join runtime filter to probe scan {}:{}, Filter {}", filter.getProbeScanOperatorId(), filter.getProbeScanMajorFragmentId(), partitionColFilter); logger.debug("Partition col filter fpp {}", partitionColFilter.map(BloomFilter::getExpectedFPP).orElse(-1D)); final MajorFragmentAssignment majorFragmentAssignment = context.getExtMajorFragmentAssignments(filter.getProbeScanMajorFragmentId()); try(ArrowBuf bloomFilterBuf = partitionColFilter.map(bf -> bf.getDataBuffer()).orElse(null)) { if (majorFragmentAssignment==null) { logger.warn("Major fragment assignment for probe scan id {} is null. Dropping the runtime filter.", filter.getProbeScanOperatorId()); return; } for (FragmentAssignment assignment : majorFragmentAssignment.getAllAssignmentList()) { try (RollbackCloseable closeOnErrSend = new RollbackCloseable()) { logger.info("Sending filter to OpId {}, Frag {}:{}", filter.getProbeScanOperatorId(), filter.getProbeScanMajorFragmentId(), assignment.getMinorFragmentIdList()); final OutOfBandMessage message = new OutOfBandMessage( context.getFragmentHandle().getQueryId(), filter.getProbeScanMajorFragmentId(), assignment.getMinorFragmentIdList(), filter.getProbeScanOperatorId(), context.getFragmentHandle().getMajorFragmentId(), context.getFragmentHandle().getMinorFragmentId(), config.getProps().getOperatorId(), new OutOfBandMessage.Payload(filter), bloomFilterBuf, true); closeOnErrSend.add(bloomFilterBuf); final NodeEndpoint endpoint = context.getEndpointsIndex().getNodeEndpoint(assignment.getAssignmentIndex()); context.getTunnelProvider().getExecTunnel(endpoint).sendOOBMessage(message); closeOnErrSend.commit(); } catch (Exception e) { logger.warn("Error while sending runtime filter to minor fragments " + assignment.getMinorFragmentIdList(), e); } } } } VectorizedHashJoinOperator(OperatorContext context, HashJoinPOP popConfig); }
VectorizedHashJoinOperator implements DualInputOperator { @VisibleForTesting void sendRuntimeFilterToProbeScan(RuntimeFilter filter, Optional<BloomFilter> partitionColFilter) { logger.debug("Sending join runtime filter to probe scan {}:{}, Filter {}", filter.getProbeScanOperatorId(), filter.getProbeScanMajorFragmentId(), partitionColFilter); logger.debug("Partition col filter fpp {}", partitionColFilter.map(BloomFilter::getExpectedFPP).orElse(-1D)); final MajorFragmentAssignment majorFragmentAssignment = context.getExtMajorFragmentAssignments(filter.getProbeScanMajorFragmentId()); try(ArrowBuf bloomFilterBuf = partitionColFilter.map(bf -> bf.getDataBuffer()).orElse(null)) { if (majorFragmentAssignment==null) { logger.warn("Major fragment assignment for probe scan id {} is null. Dropping the runtime filter.", filter.getProbeScanOperatorId()); return; } for (FragmentAssignment assignment : majorFragmentAssignment.getAllAssignmentList()) { try (RollbackCloseable closeOnErrSend = new RollbackCloseable()) { logger.info("Sending filter to OpId {}, Frag {}:{}", filter.getProbeScanOperatorId(), filter.getProbeScanMajorFragmentId(), assignment.getMinorFragmentIdList()); final OutOfBandMessage message = new OutOfBandMessage( context.getFragmentHandle().getQueryId(), filter.getProbeScanMajorFragmentId(), assignment.getMinorFragmentIdList(), filter.getProbeScanOperatorId(), context.getFragmentHandle().getMajorFragmentId(), context.getFragmentHandle().getMinorFragmentId(), config.getProps().getOperatorId(), new OutOfBandMessage.Payload(filter), bloomFilterBuf, true); closeOnErrSend.add(bloomFilterBuf); final NodeEndpoint endpoint = context.getEndpointsIndex().getNodeEndpoint(assignment.getAssignmentIndex()); context.getTunnelProvider().getExecTunnel(endpoint).sendOOBMessage(message); closeOnErrSend.commit(); } catch (Exception e) { logger.warn("Error while sending runtime filter to minor fragments " + assignment.getMinorFragmentIdList(), e); } } } } VectorizedHashJoinOperator(OperatorContext context, HashJoinPOP popConfig); @Override State getState(); VectorAccessible setup(VectorAccessible left, VectorAccessible right); @Override void consumeDataRight(int records); @Override void noMoreToConsumeRight(); @Override void consumeDataLeft(int records); @Override int outputData(); @Override void noMoreToConsumeLeft(); ArrowBuf newLinksBuffer(int recordCount); @Override OUT accept(OperatorVisitor<OUT, IN, EXCEP> visitor, IN value); @Override void workOnOOB(OutOfBandMessage message); @Override void close(); }
VectorizedHashJoinOperator implements DualInputOperator { @VisibleForTesting void sendRuntimeFilterToProbeScan(RuntimeFilter filter, Optional<BloomFilter> partitionColFilter) { logger.debug("Sending join runtime filter to probe scan {}:{}, Filter {}", filter.getProbeScanOperatorId(), filter.getProbeScanMajorFragmentId(), partitionColFilter); logger.debug("Partition col filter fpp {}", partitionColFilter.map(BloomFilter::getExpectedFPP).orElse(-1D)); final MajorFragmentAssignment majorFragmentAssignment = context.getExtMajorFragmentAssignments(filter.getProbeScanMajorFragmentId()); try(ArrowBuf bloomFilterBuf = partitionColFilter.map(bf -> bf.getDataBuffer()).orElse(null)) { if (majorFragmentAssignment==null) { logger.warn("Major fragment assignment for probe scan id {} is null. Dropping the runtime filter.", filter.getProbeScanOperatorId()); return; } for (FragmentAssignment assignment : majorFragmentAssignment.getAllAssignmentList()) { try (RollbackCloseable closeOnErrSend = new RollbackCloseable()) { logger.info("Sending filter to OpId {}, Frag {}:{}", filter.getProbeScanOperatorId(), filter.getProbeScanMajorFragmentId(), assignment.getMinorFragmentIdList()); final OutOfBandMessage message = new OutOfBandMessage( context.getFragmentHandle().getQueryId(), filter.getProbeScanMajorFragmentId(), assignment.getMinorFragmentIdList(), filter.getProbeScanOperatorId(), context.getFragmentHandle().getMajorFragmentId(), context.getFragmentHandle().getMinorFragmentId(), config.getProps().getOperatorId(), new OutOfBandMessage.Payload(filter), bloomFilterBuf, true); closeOnErrSend.add(bloomFilterBuf); final NodeEndpoint endpoint = context.getEndpointsIndex().getNodeEndpoint(assignment.getAssignmentIndex()); context.getTunnelProvider().getExecTunnel(endpoint).sendOOBMessage(message); closeOnErrSend.commit(); } catch (Exception e) { logger.warn("Error while sending runtime filter to minor fragments " + assignment.getMinorFragmentIdList(), e); } } } } VectorizedHashJoinOperator(OperatorContext context, HashJoinPOP popConfig); @Override State getState(); VectorAccessible setup(VectorAccessible left, VectorAccessible right); @Override void consumeDataRight(int records); @Override void noMoreToConsumeRight(); @Override void consumeDataLeft(int records); @Override int outputData(); @Override void noMoreToConsumeLeft(); ArrowBuf newLinksBuffer(int recordCount); @Override OUT accept(OperatorVisitor<OUT, IN, EXCEP> visitor, IN value); @Override void workOnOOB(OutOfBandMessage message); @Override void close(); static final int BATCH_MASK; }
@Test public void randomBits() throws Exception { final int count = 8*1024*1024; BitSet bitSet = new BitSet(count); final Random rand = new Random(); try (MatchBitSet matchBitSet = new MatchBitSet(count, allocator)) { for (int i = 0; i < count; i ++) { int val = rand.nextInt(10); if (val > 3) { bitSet.set(i); matchBitSet.set(i); } } validateBits(matchBitSet, bitSet, count); } }
public void set(final int index) { final int wordNum = index >>> LONG_TO_BITS_SHIFT; final int bit = index & BIT_OFFSET_MUSK; final long bitMask = 1L << bit; final long wordAddr = bufferAddr + wordNum * BYTES_PER_WORD; PlatformDependent.putLong(wordAddr, PlatformDependent.getLong(wordAddr) | bitMask); }
MatchBitSet implements AutoCloseable { public void set(final int index) { final int wordNum = index >>> LONG_TO_BITS_SHIFT; final int bit = index & BIT_OFFSET_MUSK; final long bitMask = 1L << bit; final long wordAddr = bufferAddr + wordNum * BYTES_PER_WORD; PlatformDependent.putLong(wordAddr, PlatformDependent.getLong(wordAddr) | bitMask); } }
MatchBitSet implements AutoCloseable { public void set(final int index) { final int wordNum = index >>> LONG_TO_BITS_SHIFT; final int bit = index & BIT_OFFSET_MUSK; final long bitMask = 1L << bit; final long wordAddr = bufferAddr + wordNum * BYTES_PER_WORD; PlatformDependent.putLong(wordAddr, PlatformDependent.getLong(wordAddr) | bitMask); } MatchBitSet(final int numBits, final BufferAllocator allocator); }
MatchBitSet implements AutoCloseable { public void set(final int index) { final int wordNum = index >>> LONG_TO_BITS_SHIFT; final int bit = index & BIT_OFFSET_MUSK; final long bitMask = 1L << bit; final long wordAddr = bufferAddr + wordNum * BYTES_PER_WORD; PlatformDependent.putLong(wordAddr, PlatformDependent.getLong(wordAddr) | bitMask); } MatchBitSet(final int numBits, final BufferAllocator allocator); void set(final int index); boolean get(final int index); int nextUnSetBit(final int index); int cardinality(); @Override void close(); }
MatchBitSet implements AutoCloseable { public void set(final int index) { final int wordNum = index >>> LONG_TO_BITS_SHIFT; final int bit = index & BIT_OFFSET_MUSK; final long bitMask = 1L << bit; final long wordAddr = bufferAddr + wordNum * BYTES_PER_WORD; PlatformDependent.putLong(wordAddr, PlatformDependent.getLong(wordAddr) | bitMask); } MatchBitSet(final int numBits, final BufferAllocator allocator); void set(final int index); boolean get(final int index); int nextUnSetBit(final int index); int cardinality(); @Override void close(); }
@Test public void fullBits() throws Exception { final int count = 256*1024; BitSet bitSet = new BitSet(count); try (MatchBitSet matchBitSet = new MatchBitSet(count, allocator)) { for (int i = 0; i < count; i++) { bitSet.set(i); matchBitSet.set(i); } validateBits(matchBitSet, bitSet, count); } }
public void set(final int index) { final int wordNum = index >>> LONG_TO_BITS_SHIFT; final int bit = index & BIT_OFFSET_MUSK; final long bitMask = 1L << bit; final long wordAddr = bufferAddr + wordNum * BYTES_PER_WORD; PlatformDependent.putLong(wordAddr, PlatformDependent.getLong(wordAddr) | bitMask); }
MatchBitSet implements AutoCloseable { public void set(final int index) { final int wordNum = index >>> LONG_TO_BITS_SHIFT; final int bit = index & BIT_OFFSET_MUSK; final long bitMask = 1L << bit; final long wordAddr = bufferAddr + wordNum * BYTES_PER_WORD; PlatformDependent.putLong(wordAddr, PlatformDependent.getLong(wordAddr) | bitMask); } }
MatchBitSet implements AutoCloseable { public void set(final int index) { final int wordNum = index >>> LONG_TO_BITS_SHIFT; final int bit = index & BIT_OFFSET_MUSK; final long bitMask = 1L << bit; final long wordAddr = bufferAddr + wordNum * BYTES_PER_WORD; PlatformDependent.putLong(wordAddr, PlatformDependent.getLong(wordAddr) | bitMask); } MatchBitSet(final int numBits, final BufferAllocator allocator); }
MatchBitSet implements AutoCloseable { public void set(final int index) { final int wordNum = index >>> LONG_TO_BITS_SHIFT; final int bit = index & BIT_OFFSET_MUSK; final long bitMask = 1L << bit; final long wordAddr = bufferAddr + wordNum * BYTES_PER_WORD; PlatformDependent.putLong(wordAddr, PlatformDependent.getLong(wordAddr) | bitMask); } MatchBitSet(final int numBits, final BufferAllocator allocator); void set(final int index); boolean get(final int index); int nextUnSetBit(final int index); int cardinality(); @Override void close(); }
MatchBitSet implements AutoCloseable { public void set(final int index) { final int wordNum = index >>> LONG_TO_BITS_SHIFT; final int bit = index & BIT_OFFSET_MUSK; final long bitMask = 1L << bit; final long wordAddr = bufferAddr + wordNum * BYTES_PER_WORD; PlatformDependent.putLong(wordAddr, PlatformDependent.getLong(wordAddr) | bitMask); } MatchBitSet(final int numBits, final BufferAllocator allocator); void set(final int index); boolean get(final int index); int nextUnSetBit(final int index); int cardinality(); @Override void close(); }
@Test public void specifiedBits() throws Exception { final int count = 256*1024 + 13; BitSet bitSet = new BitSet(count); try (MatchBitSet matchBitSet = new MatchBitSet(count, allocator)) { for (int i = 0; i < count; i += WORD_BITS) { if ((i / WORD_BITS) % 3 == 0) { for (int j = 0; j < WORD_BITS; j++) { bitSet.set(i + j); matchBitSet.set(i + j); } } else if ((i / WORD_BITS) % 3 == 1) { for (int j = 0; j < WORD_BITS; j++) { if (j % 3 == 0) { bitSet.set(i + j); matchBitSet.set(i + j); } } } else { } } validateBits(matchBitSet, bitSet, count); } }
public void set(final int index) { final int wordNum = index >>> LONG_TO_BITS_SHIFT; final int bit = index & BIT_OFFSET_MUSK; final long bitMask = 1L << bit; final long wordAddr = bufferAddr + wordNum * BYTES_PER_WORD; PlatformDependent.putLong(wordAddr, PlatformDependent.getLong(wordAddr) | bitMask); }
MatchBitSet implements AutoCloseable { public void set(final int index) { final int wordNum = index >>> LONG_TO_BITS_SHIFT; final int bit = index & BIT_OFFSET_MUSK; final long bitMask = 1L << bit; final long wordAddr = bufferAddr + wordNum * BYTES_PER_WORD; PlatformDependent.putLong(wordAddr, PlatformDependent.getLong(wordAddr) | bitMask); } }
MatchBitSet implements AutoCloseable { public void set(final int index) { final int wordNum = index >>> LONG_TO_BITS_SHIFT; final int bit = index & BIT_OFFSET_MUSK; final long bitMask = 1L << bit; final long wordAddr = bufferAddr + wordNum * BYTES_PER_WORD; PlatformDependent.putLong(wordAddr, PlatformDependent.getLong(wordAddr) | bitMask); } MatchBitSet(final int numBits, final BufferAllocator allocator); }
MatchBitSet implements AutoCloseable { public void set(final int index) { final int wordNum = index >>> LONG_TO_BITS_SHIFT; final int bit = index & BIT_OFFSET_MUSK; final long bitMask = 1L << bit; final long wordAddr = bufferAddr + wordNum * BYTES_PER_WORD; PlatformDependent.putLong(wordAddr, PlatformDependent.getLong(wordAddr) | bitMask); } MatchBitSet(final int numBits, final BufferAllocator allocator); void set(final int index); boolean get(final int index); int nextUnSetBit(final int index); int cardinality(); @Override void close(); }
MatchBitSet implements AutoCloseable { public void set(final int index) { final int wordNum = index >>> LONG_TO_BITS_SHIFT; final int bit = index & BIT_OFFSET_MUSK; final long bitMask = 1L << bit; final long wordAddr = bufferAddr + wordNum * BYTES_PER_WORD; PlatformDependent.putLong(wordAddr, PlatformDependent.getLong(wordAddr) | bitMask); } MatchBitSet(final int numBits, final BufferAllocator allocator); void set(final int index); boolean get(final int index); int nextUnSetBit(final int index); int cardinality(); @Override void close(); }
@Test public void testPrepareBloomFilter() throws Exception { List<AutoCloseable> closeables = new ArrayList<>(); try (ArrowBuf keyBuf = testAllocator.buffer(9); LBlockHashTableEight table = new LBlockHashTableEight(HashConfig.getDefault(), testAllocator, 16)) { Set<Long> dataSet = generatedData(10); dataSet.stream().forEach(key -> table.insert(key, (int) HashComputation.computeHash(key))); final Optional<BloomFilter> bloomFilterOptional = table.prepareBloomFilter(false); assertTrue(bloomFilterOptional.isPresent()); closeables.add(bloomFilterOptional.get()); dataSet.stream().forEach(key -> assertTrue(bloomFilterOptional.get().mightContain(writeKey(keyBuf, key), 9))); Set<Long> differentData = generatedData(100); long fpCount = differentData.stream() .filter(key -> !dataSet.contains(key)) .filter(key -> bloomFilterOptional.get().mightContain(writeKey(keyBuf, key), 9)).count(); assertTrue("False positive count is high - " + fpCount, fpCount < 5); BloomFilter bloomFilter = bloomFilterOptional.get(); assertFalse(bloomFilter.mightContain(writeNull(keyBuf), 9)); table.insertNull(); BloomFilter bloomFilter2 = table.prepareBloomFilter(false).get(); closeables.add(bloomFilter2); assertTrue(bloomFilter2.mightContain(writeNull(keyBuf), 9)); } finally { AutoCloseables.close(closeables); } }
public Optional<BloomFilter> prepareBloomFilter(final boolean sizeDynamically) throws Exception { final long bloomFilterSize = sizeDynamically ? Math.min(BloomFilter.getOptimalSize(size()), BLOOMFILTER_MAX_SIZE) : BLOOMFILTER_MAX_SIZE; try (ArrowBuf keyHolder = allocator.buffer(9); RollbackCloseable closeOnErr = new RollbackCloseable()) { final BloomFilter bloomFilter = new BloomFilter(allocator, Thread.currentThread().getName(), bloomFilterSize); closeOnErr.add(bloomFilter); bloomFilter.setup(); for (int chunk = 0; chunk < tableFixedAddresses.length; chunk++) { final long chunkAddr = tableFixedAddresses[chunk]; final long chunkEnd = chunkAddr + (MAX_VALUES_PER_BATCH * BLOCK_WIDTH); for (long blockAddr = chunkAddr; blockAddr < chunkEnd; blockAddr += BLOCK_WIDTH) { final long key = PlatformDependent.getLong(blockAddr); if (key == this.freeValue) { continue; } keyHolder.writerIndex(0); final byte validityByte = (key == NULL_KEY_VALUE) ? (byte)0x00 : (byte)0x01; keyHolder.writeByte(validityByte); keyHolder.writeLong(key); bloomFilter.put(keyHolder, 9); } } closeOnErr.commit(); return Optional.of(bloomFilter); } }
LBlockHashTableEight implements AutoCloseable { public Optional<BloomFilter> prepareBloomFilter(final boolean sizeDynamically) throws Exception { final long bloomFilterSize = sizeDynamically ? Math.min(BloomFilter.getOptimalSize(size()), BLOOMFILTER_MAX_SIZE) : BLOOMFILTER_MAX_SIZE; try (ArrowBuf keyHolder = allocator.buffer(9); RollbackCloseable closeOnErr = new RollbackCloseable()) { final BloomFilter bloomFilter = new BloomFilter(allocator, Thread.currentThread().getName(), bloomFilterSize); closeOnErr.add(bloomFilter); bloomFilter.setup(); for (int chunk = 0; chunk < tableFixedAddresses.length; chunk++) { final long chunkAddr = tableFixedAddresses[chunk]; final long chunkEnd = chunkAddr + (MAX_VALUES_PER_BATCH * BLOCK_WIDTH); for (long blockAddr = chunkAddr; blockAddr < chunkEnd; blockAddr += BLOCK_WIDTH) { final long key = PlatformDependent.getLong(blockAddr); if (key == this.freeValue) { continue; } keyHolder.writerIndex(0); final byte validityByte = (key == NULL_KEY_VALUE) ? (byte)0x00 : (byte)0x01; keyHolder.writeByte(validityByte); keyHolder.writeLong(key); bloomFilter.put(keyHolder, 9); } } closeOnErr.commit(); return Optional.of(bloomFilter); } } }
LBlockHashTableEight implements AutoCloseable { public Optional<BloomFilter> prepareBloomFilter(final boolean sizeDynamically) throws Exception { final long bloomFilterSize = sizeDynamically ? Math.min(BloomFilter.getOptimalSize(size()), BLOOMFILTER_MAX_SIZE) : BLOOMFILTER_MAX_SIZE; try (ArrowBuf keyHolder = allocator.buffer(9); RollbackCloseable closeOnErr = new RollbackCloseable()) { final BloomFilter bloomFilter = new BloomFilter(allocator, Thread.currentThread().getName(), bloomFilterSize); closeOnErr.add(bloomFilter); bloomFilter.setup(); for (int chunk = 0; chunk < tableFixedAddresses.length; chunk++) { final long chunkAddr = tableFixedAddresses[chunk]; final long chunkEnd = chunkAddr + (MAX_VALUES_PER_BATCH * BLOCK_WIDTH); for (long blockAddr = chunkAddr; blockAddr < chunkEnd; blockAddr += BLOCK_WIDTH) { final long key = PlatformDependent.getLong(blockAddr); if (key == this.freeValue) { continue; } keyHolder.writerIndex(0); final byte validityByte = (key == NULL_KEY_VALUE) ? (byte)0x00 : (byte)0x01; keyHolder.writeByte(validityByte); keyHolder.writeLong(key); bloomFilter.put(keyHolder, 9); } } closeOnErr.commit(); return Optional.of(bloomFilter); } } LBlockHashTableEight(HashConfig config, BufferAllocator allocator, int initialSize); }
LBlockHashTableEight implements AutoCloseable { public Optional<BloomFilter> prepareBloomFilter(final boolean sizeDynamically) throws Exception { final long bloomFilterSize = sizeDynamically ? Math.min(BloomFilter.getOptimalSize(size()), BLOOMFILTER_MAX_SIZE) : BLOOMFILTER_MAX_SIZE; try (ArrowBuf keyHolder = allocator.buffer(9); RollbackCloseable closeOnErr = new RollbackCloseable()) { final BloomFilter bloomFilter = new BloomFilter(allocator, Thread.currentThread().getName(), bloomFilterSize); closeOnErr.add(bloomFilter); bloomFilter.setup(); for (int chunk = 0; chunk < tableFixedAddresses.length; chunk++) { final long chunkAddr = tableFixedAddresses[chunk]; final long chunkEnd = chunkAddr + (MAX_VALUES_PER_BATCH * BLOCK_WIDTH); for (long blockAddr = chunkAddr; blockAddr < chunkEnd; blockAddr += BLOCK_WIDTH) { final long key = PlatformDependent.getLong(blockAddr); if (key == this.freeValue) { continue; } keyHolder.writerIndex(0); final byte validityByte = (key == NULL_KEY_VALUE) ? (byte)0x00 : (byte)0x01; keyHolder.writeByte(validityByte); keyHolder.writeLong(key); bloomFilter.put(keyHolder, 9); } } closeOnErr.commit(); return Optional.of(bloomFilter); } } LBlockHashTableEight(HashConfig config, BufferAllocator allocator, int initialSize); int insert(long key, int keyHash); int getNull(); int insertNull(); int insertNull(int oldNullKeyOrdinal); int get(long key, int keyHash); @Override int hashCode(); @Override String toString(); @Override boolean equals(Object obj); int size(); int blocks(); int capacity(); @Override void close(); long getRehashTime(TimeUnit unit); int getRehashCount(); Optional<BloomFilter> prepareBloomFilter(final boolean sizeDynamically); }
LBlockHashTableEight implements AutoCloseable { public Optional<BloomFilter> prepareBloomFilter(final boolean sizeDynamically) throws Exception { final long bloomFilterSize = sizeDynamically ? Math.min(BloomFilter.getOptimalSize(size()), BLOOMFILTER_MAX_SIZE) : BLOOMFILTER_MAX_SIZE; try (ArrowBuf keyHolder = allocator.buffer(9); RollbackCloseable closeOnErr = new RollbackCloseable()) { final BloomFilter bloomFilter = new BloomFilter(allocator, Thread.currentThread().getName(), bloomFilterSize); closeOnErr.add(bloomFilter); bloomFilter.setup(); for (int chunk = 0; chunk < tableFixedAddresses.length; chunk++) { final long chunkAddr = tableFixedAddresses[chunk]; final long chunkEnd = chunkAddr + (MAX_VALUES_PER_BATCH * BLOCK_WIDTH); for (long blockAddr = chunkAddr; blockAddr < chunkEnd; blockAddr += BLOCK_WIDTH) { final long key = PlatformDependent.getLong(blockAddr); if (key == this.freeValue) { continue; } keyHolder.writerIndex(0); final byte validityByte = (key == NULL_KEY_VALUE) ? (byte)0x00 : (byte)0x01; keyHolder.writeByte(validityByte); keyHolder.writeLong(key); bloomFilter.put(keyHolder, 9); } } closeOnErr.commit(); return Optional.of(bloomFilter); } } LBlockHashTableEight(HashConfig config, BufferAllocator allocator, int initialSize); int insert(long key, int keyHash); int getNull(); int insertNull(); int insertNull(int oldNullKeyOrdinal); int get(long key, int keyHash); @Override int hashCode(); @Override String toString(); @Override boolean equals(Object obj); int size(); int blocks(); int capacity(); @Override void close(); long getRehashTime(TimeUnit unit); int getRehashCount(); Optional<BloomFilter> prepareBloomFilter(final boolean sizeDynamically); static final int KEY_WIDTH; static final int ORDINAL_WIDTH; static final int BLOCK_WIDTH; static final int BITS_IN_CHUNK; static final int MAX_VALUES_PER_BATCH; static final int CHUNK_OFFSET_MASK; static final int POSITIVE_MASK; static final int NO_MATCH; }
@Test public void testReserveMemory() { SharedResourceGroup resourceGroup = mock(SharedResourceGroup.class); SabotConfig config = mock(SabotConfig.class); FragmentWorkQueue workQueue = mock(FragmentWorkQueue.class); TunnelProvider tunnelProvider = mock(TunnelProvider.class); EndpointsIndex endpointsIndex = new EndpointsIndex( Arrays.asList( NodeEndpoint.newBuilder().setAddress("localhost").setFabricPort(12345).build(), NodeEndpoint.newBuilder().setAddress("localhost").setFabricPort(12345).build() ) ); List<CoordExecRPC.MinorFragmentIndexEndpoint> list = Arrays.asList( MinorFragmentIndexEndpoint.newBuilder().setEndpointIndex(0).setMinorFragmentId(0).build(), MinorFragmentIndexEndpoint.newBuilder().setEndpointIndex(0).setMinorFragmentId(0).build() ); CoordExecRPC.Collector collector = CoordExecRPC.Collector.newBuilder() .setIsSpooling(true) .setOppositeMajorFragmentId(3) .setSupportsOutOfOrder(true) .addAllIncomingMinorFragmentIndex(list) .build(); ExecProtos.FragmentHandle handle = ExecProtos.FragmentHandle.newBuilder().setMajorFragmentId(2323).setMinorFragmentId(234234).build(); BufferAllocator allocator = allocatorRule.newAllocator("test-abstract-data-collector", 0, 2000000); boolean outOfMemory = false; final SchedulerService schedulerService = Mockito.mock(SchedulerService.class); final SpillService spillService = new SpillServiceImpl(DremioConfig.create(null, config), new DefaultSpillServiceOptions(), new Provider<SchedulerService>() { @Override public SchedulerService get() { return schedulerService; } }); try { AbstractDataCollector dataCollector = new AbstractDataCollector(resourceGroup, true, collector, 10240, allocator, config, handle, workQueue, tunnelProvider, spillService, endpointsIndex) { @Override protected RawBatchBuffer getBuffer(int minorFragmentId) { return null; } }; } catch (OutOfMemoryException e) { assertEquals(allocator.getPeakMemoryAllocation(), 1024*1024); outOfMemory = true; } assertTrue(outOfMemory); allocator.close(); }
@Override public synchronized void close() throws Exception { if(!closed){ final List<AutoCloseable> closeables = new ArrayList<>(); closeables.addAll(Arrays.asList(buffers)); closeables.add(new AutoCloseable() { @Override public void close() throws Exception { for (int i = 0; i < completionMessages.length; i++) { completionMessages[i].informUpstreamIfNecessary(); } } }); closeables.add(new AutoCloseable() { @Override public void close() throws Exception { closed = true; } }); AutoCloseables.close(closeables); } }
AbstractDataCollector implements DataCollector { @Override public synchronized void close() throws Exception { if(!closed){ final List<AutoCloseable> closeables = new ArrayList<>(); closeables.addAll(Arrays.asList(buffers)); closeables.add(new AutoCloseable() { @Override public void close() throws Exception { for (int i = 0; i < completionMessages.length; i++) { completionMessages[i].informUpstreamIfNecessary(); } } }); closeables.add(new AutoCloseable() { @Override public void close() throws Exception { closed = true; } }); AutoCloseables.close(closeables); } } }
AbstractDataCollector implements DataCollector { @Override public synchronized void close() throws Exception { if(!closed){ final List<AutoCloseable> closeables = new ArrayList<>(); closeables.addAll(Arrays.asList(buffers)); closeables.add(new AutoCloseable() { @Override public void close() throws Exception { for (int i = 0; i < completionMessages.length; i++) { completionMessages[i].informUpstreamIfNecessary(); } } }); closeables.add(new AutoCloseable() { @Override public void close() throws Exception { closed = true; } }); AutoCloseables.close(closeables); } } AbstractDataCollector( SharedResourceGroup resourceGroup, boolean isDiscrete, Collector collector, final int bufferCapacity, BufferAllocator allocator, SabotConfig config, FragmentHandle handle, FragmentWorkQueue workQueue, TunnelProvider tunnelProvider, SpillService spillService, EndpointsIndex endpointsIndex); }
AbstractDataCollector implements DataCollector { @Override public synchronized void close() throws Exception { if(!closed){ final List<AutoCloseable> closeables = new ArrayList<>(); closeables.addAll(Arrays.asList(buffers)); closeables.add(new AutoCloseable() { @Override public void close() throws Exception { for (int i = 0; i < completionMessages.length; i++) { completionMessages[i].informUpstreamIfNecessary(); } } }); closeables.add(new AutoCloseable() { @Override public void close() throws Exception { closed = true; } }); AutoCloseables.close(closeables); } } AbstractDataCollector( SharedResourceGroup resourceGroup, boolean isDiscrete, Collector collector, final int bufferCapacity, BufferAllocator allocator, SabotConfig config, FragmentHandle handle, FragmentWorkQueue workQueue, TunnelProvider tunnelProvider, SpillService spillService, EndpointsIndex endpointsIndex); @Override int getOppositeMajorFragmentId(); @Override RawBatchBuffer[] getBuffers(); @Override void streamCompleted(int minorFragmentId); @Override void batchArrived(int minorFragmentId, RawFragmentBatch batch); @Override int getTotalIncomingFragments(); @Override synchronized void close(); }
AbstractDataCollector implements DataCollector { @Override public synchronized void close() throws Exception { if(!closed){ final List<AutoCloseable> closeables = new ArrayList<>(); closeables.addAll(Arrays.asList(buffers)); closeables.add(new AutoCloseable() { @Override public void close() throws Exception { for (int i = 0; i < completionMessages.length; i++) { completionMessages[i].informUpstreamIfNecessary(); } } }); closeables.add(new AutoCloseable() { @Override public void close() throws Exception { closed = true; } }); AutoCloseables.close(closeables); } } AbstractDataCollector( SharedResourceGroup resourceGroup, boolean isDiscrete, Collector collector, final int bufferCapacity, BufferAllocator allocator, SabotConfig config, FragmentHandle handle, FragmentWorkQueue workQueue, TunnelProvider tunnelProvider, SpillService spillService, EndpointsIndex endpointsIndex); @Override int getOppositeMajorFragmentId(); @Override RawBatchBuffer[] getBuffers(); @Override void streamCompleted(int minorFragmentId); @Override void batchArrived(int minorFragmentId, RawFragmentBatch batch); @Override int getTotalIncomingFragments(); @Override synchronized void close(); }
@Test public void pivotDef(){ try( IntVector col1 = new IntVector("col1", allocator); IntVector col2 = new IntVector("col2", allocator); BigIntVector col3 = new BigIntVector("col3", allocator); TimeMilliVector col4 = new TimeMilliVector("col4", allocator); VarCharVector col5 = new VarCharVector("col5", allocator); VarCharVector col6 = new VarCharVector("col6", allocator); BitVector col7 = new BitVector("col7", allocator); ){ PivotDef pivot = PivotBuilder.getBlockDefinition(FluentIterable .from(ImmutableList.of(col1, col2, col3, col4, col5, col6, col7)) .transform(new Function<FieldVector, FieldVectorPair>(){ @Override public FieldVectorPair apply(FieldVector input) { return new FieldVectorPair(input, input); }}) .toList()); assertEquals(2, pivot.getVariableCount()); assertEquals( 4 + 4 + 4 + 8 + 4 + 0 + 0 + 0 + 4 , pivot.getBlockWidth()); assertEquals(8, pivot.getBitCount()); assertEquals(ImmutableList.of( new VectorPivotDef(FieldType.FOUR_BYTE, 0, 0, 4, col1, col1), new VectorPivotDef(FieldType.FOUR_BYTE, 0, 1, 8, col2, col2), new VectorPivotDef(FieldType.EIGHT_BYTE, 0, 2, 12, col3, col3), new VectorPivotDef(FieldType.FOUR_BYTE, 0, 3, 20, col4, col4), new VectorPivotDef(FieldType.BIT, 0, 6, 7, col7, col7) ), pivot.getFixedPivots()); assertEquals(ImmutableList.of( new VectorPivotDef(FieldType.VARIABLE, 0, 4, 0, col5, col5), new VectorPivotDef(FieldType.VARIABLE, 0, 5, 1, col6, col6) ), pivot.getVariablePivots()); } }
public PivotDef( int blockWidth, int variableCount, int bitCount, List<VectorPivotDef> fields) { super(); this.blockWidth = blockWidth; this.variableCount = variableCount; this.bitCount = bitCount; this.vectorPivots = ImmutableList.copyOf(fields); this.fixedPivots = FluentIterable.from(vectorPivots).filter(new Predicate<VectorPivotDef>(){ @Override public boolean apply(VectorPivotDef input) { return input.getType().mode != FieldMode.VARIABLE; }}).toList(); this.bitPivots = FluentIterable.from(vectorPivots).filter(new Predicate<VectorPivotDef>(){ @Override public boolean apply(VectorPivotDef input) { return input.getType().mode == FieldMode.BIT; }}).toList(); this.nonBitFixedPivots = FluentIterable.from(vectorPivots).filter(new Predicate<VectorPivotDef>(){ @Override public boolean apply(VectorPivotDef input) { return input.getType().mode == FieldMode.FIXED; }}).toList(); this.variablePivots = FluentIterable.from(vectorPivots).filter(new Predicate<VectorPivotDef>(){ @Override public boolean apply(VectorPivotDef input) { return input.getType().mode == FieldMode.VARIABLE; }}).toList(); this.outputVectors = FluentIterable.from(vectorPivots).transform(new Function<VectorPivotDef, FieldVector>(){ @Override public FieldVector apply(VectorPivotDef input) { return input.getOutgoingVector(); }}).toList(); }
PivotDef { public PivotDef( int blockWidth, int variableCount, int bitCount, List<VectorPivotDef> fields) { super(); this.blockWidth = blockWidth; this.variableCount = variableCount; this.bitCount = bitCount; this.vectorPivots = ImmutableList.copyOf(fields); this.fixedPivots = FluentIterable.from(vectorPivots).filter(new Predicate<VectorPivotDef>(){ @Override public boolean apply(VectorPivotDef input) { return input.getType().mode != FieldMode.VARIABLE; }}).toList(); this.bitPivots = FluentIterable.from(vectorPivots).filter(new Predicate<VectorPivotDef>(){ @Override public boolean apply(VectorPivotDef input) { return input.getType().mode == FieldMode.BIT; }}).toList(); this.nonBitFixedPivots = FluentIterable.from(vectorPivots).filter(new Predicate<VectorPivotDef>(){ @Override public boolean apply(VectorPivotDef input) { return input.getType().mode == FieldMode.FIXED; }}).toList(); this.variablePivots = FluentIterable.from(vectorPivots).filter(new Predicate<VectorPivotDef>(){ @Override public boolean apply(VectorPivotDef input) { return input.getType().mode == FieldMode.VARIABLE; }}).toList(); this.outputVectors = FluentIterable.from(vectorPivots).transform(new Function<VectorPivotDef, FieldVector>(){ @Override public FieldVector apply(VectorPivotDef input) { return input.getOutgoingVector(); }}).toList(); } }
PivotDef { public PivotDef( int blockWidth, int variableCount, int bitCount, List<VectorPivotDef> fields) { super(); this.blockWidth = blockWidth; this.variableCount = variableCount; this.bitCount = bitCount; this.vectorPivots = ImmutableList.copyOf(fields); this.fixedPivots = FluentIterable.from(vectorPivots).filter(new Predicate<VectorPivotDef>(){ @Override public boolean apply(VectorPivotDef input) { return input.getType().mode != FieldMode.VARIABLE; }}).toList(); this.bitPivots = FluentIterable.from(vectorPivots).filter(new Predicate<VectorPivotDef>(){ @Override public boolean apply(VectorPivotDef input) { return input.getType().mode == FieldMode.BIT; }}).toList(); this.nonBitFixedPivots = FluentIterable.from(vectorPivots).filter(new Predicate<VectorPivotDef>(){ @Override public boolean apply(VectorPivotDef input) { return input.getType().mode == FieldMode.FIXED; }}).toList(); this.variablePivots = FluentIterable.from(vectorPivots).filter(new Predicate<VectorPivotDef>(){ @Override public boolean apply(VectorPivotDef input) { return input.getType().mode == FieldMode.VARIABLE; }}).toList(); this.outputVectors = FluentIterable.from(vectorPivots).transform(new Function<VectorPivotDef, FieldVector>(){ @Override public FieldVector apply(VectorPivotDef input) { return input.getOutgoingVector(); }}).toList(); } PivotDef( int blockWidth, int variableCount, int bitCount, List<VectorPivotDef> fields); }
PivotDef { public PivotDef( int blockWidth, int variableCount, int bitCount, List<VectorPivotDef> fields) { super(); this.blockWidth = blockWidth; this.variableCount = variableCount; this.bitCount = bitCount; this.vectorPivots = ImmutableList.copyOf(fields); this.fixedPivots = FluentIterable.from(vectorPivots).filter(new Predicate<VectorPivotDef>(){ @Override public boolean apply(VectorPivotDef input) { return input.getType().mode != FieldMode.VARIABLE; }}).toList(); this.bitPivots = FluentIterable.from(vectorPivots).filter(new Predicate<VectorPivotDef>(){ @Override public boolean apply(VectorPivotDef input) { return input.getType().mode == FieldMode.BIT; }}).toList(); this.nonBitFixedPivots = FluentIterable.from(vectorPivots).filter(new Predicate<VectorPivotDef>(){ @Override public boolean apply(VectorPivotDef input) { return input.getType().mode == FieldMode.FIXED; }}).toList(); this.variablePivots = FluentIterable.from(vectorPivots).filter(new Predicate<VectorPivotDef>(){ @Override public boolean apply(VectorPivotDef input) { return input.getType().mode == FieldMode.VARIABLE; }}).toList(); this.outputVectors = FluentIterable.from(vectorPivots).transform(new Function<VectorPivotDef, FieldVector>(){ @Override public FieldVector apply(VectorPivotDef input) { return input.getOutgoingVector(); }}).toList(); } PivotDef( int blockWidth, int variableCount, int bitCount, List<VectorPivotDef> fields); ImmutableList<VectorPivotDef> getBitPivots(); ImmutableList<VectorPivotDef> getNonBitFixedPivots(); int getBlockWidth(); int getVariableCount(); int getBitCount(); List<VectorPivotDef> getVectorPivots(); List<VectorPivotDef> getVariablePivots(); List<VectorPivotDef> getFixedPivots(); List<FieldVector> getOutputVectors(); }
PivotDef { public PivotDef( int blockWidth, int variableCount, int bitCount, List<VectorPivotDef> fields) { super(); this.blockWidth = blockWidth; this.variableCount = variableCount; this.bitCount = bitCount; this.vectorPivots = ImmutableList.copyOf(fields); this.fixedPivots = FluentIterable.from(vectorPivots).filter(new Predicate<VectorPivotDef>(){ @Override public boolean apply(VectorPivotDef input) { return input.getType().mode != FieldMode.VARIABLE; }}).toList(); this.bitPivots = FluentIterable.from(vectorPivots).filter(new Predicate<VectorPivotDef>(){ @Override public boolean apply(VectorPivotDef input) { return input.getType().mode == FieldMode.BIT; }}).toList(); this.nonBitFixedPivots = FluentIterable.from(vectorPivots).filter(new Predicate<VectorPivotDef>(){ @Override public boolean apply(VectorPivotDef input) { return input.getType().mode == FieldMode.FIXED; }}).toList(); this.variablePivots = FluentIterable.from(vectorPivots).filter(new Predicate<VectorPivotDef>(){ @Override public boolean apply(VectorPivotDef input) { return input.getType().mode == FieldMode.VARIABLE; }}).toList(); this.outputVectors = FluentIterable.from(vectorPivots).transform(new Function<VectorPivotDef, FieldVector>(){ @Override public FieldVector apply(VectorPivotDef input) { return input.getOutgoingVector(); }}).toList(); } PivotDef( int blockWidth, int variableCount, int bitCount, List<VectorPivotDef> fields); ImmutableList<VectorPivotDef> getBitPivots(); ImmutableList<VectorPivotDef> getNonBitFixedPivots(); int getBlockWidth(); int getVariableCount(); int getBitCount(); List<VectorPivotDef> getVectorPivots(); List<VectorPivotDef> getVariablePivots(); List<VectorPivotDef> getFixedPivots(); List<FieldVector> getOutputVectors(); }
@Test public void boolNullEveryOther() throws Exception { final int count = 1024; try ( BitVector in = new BitVector("in", allocator); BitVector out = new BitVector("out", allocator); ) { in.allocateNew(count); ArrowBuf tempBuf = allocator.buffer(1024); for (int i = 0; i < count; i ++) { if (i % 2 == 0) { in.set(i, 1); } } in.setValueCount(count); final PivotDef pivot = PivotBuilder.getBlockDefinition(new FieldVectorPair(in, out)); try ( final FixedBlockVector fbv = new FixedBlockVector(allocator, pivot.getBlockWidth()); final VariableBlockVector vbv = new VariableBlockVector(allocator, pivot.getVariableCount()); ) { fbv.ensureAvailableBlocks(count); Pivots.pivot(pivot, count, fbv, vbv); Unpivots.unpivot(pivot, fbv, vbv, 0, count); for (int i = 0; i < count; i++) { assertEquals(in.getObject(i), out.getObject(i)); } } tempBuf.release(); } }
public static int pivot(PivotDef pivot, int start, int count, FixedBlockVector fixedBlock, VariableBlockVector variable) { if (pivot.getVariableCount() > 0) { int updatedCount = pivotVariableLengths(pivot.getVariablePivots(), fixedBlock, variable, start, count); Preconditions.checkState(updatedCount <= count); count = updatedCount; } for(VectorPivotDef def : pivot.getFixedPivots()){ switch(def.getType()){ case BIT: pivotBit(def, fixedBlock, start, count); break; case FOUR_BYTE: pivot4Bytes(def, fixedBlock, start, count); break; case EIGHT_BYTE: pivot8Bytes(def, fixedBlock, start, count); break; case SIXTEEN_BYTE: pivot16Bytes(def, fixedBlock, start, count); break; case VARIABLE: default: throw new UnsupportedOperationException("Pivot: unknown type: " + Describer.describe(def.getIncomingVector().getField())); } } return count; }
BoundedPivots { public static int pivot(PivotDef pivot, int start, int count, FixedBlockVector fixedBlock, VariableBlockVector variable) { if (pivot.getVariableCount() > 0) { int updatedCount = pivotVariableLengths(pivot.getVariablePivots(), fixedBlock, variable, start, count); Preconditions.checkState(updatedCount <= count); count = updatedCount; } for(VectorPivotDef def : pivot.getFixedPivots()){ switch(def.getType()){ case BIT: pivotBit(def, fixedBlock, start, count); break; case FOUR_BYTE: pivot4Bytes(def, fixedBlock, start, count); break; case EIGHT_BYTE: pivot8Bytes(def, fixedBlock, start, count); break; case SIXTEEN_BYTE: pivot16Bytes(def, fixedBlock, start, count); break; case VARIABLE: default: throw new UnsupportedOperationException("Pivot: unknown type: " + Describer.describe(def.getIncomingVector().getField())); } } return count; } }
BoundedPivots { public static int pivot(PivotDef pivot, int start, int count, FixedBlockVector fixedBlock, VariableBlockVector variable) { if (pivot.getVariableCount() > 0) { int updatedCount = pivotVariableLengths(pivot.getVariablePivots(), fixedBlock, variable, start, count); Preconditions.checkState(updatedCount <= count); count = updatedCount; } for(VectorPivotDef def : pivot.getFixedPivots()){ switch(def.getType()){ case BIT: pivotBit(def, fixedBlock, start, count); break; case FOUR_BYTE: pivot4Bytes(def, fixedBlock, start, count); break; case EIGHT_BYTE: pivot8Bytes(def, fixedBlock, start, count); break; case SIXTEEN_BYTE: pivot16Bytes(def, fixedBlock, start, count); break; case VARIABLE: default: throw new UnsupportedOperationException("Pivot: unknown type: " + Describer.describe(def.getIncomingVector().getField())); } } return count; } }
BoundedPivots { public static int pivot(PivotDef pivot, int start, int count, FixedBlockVector fixedBlock, VariableBlockVector variable) { if (pivot.getVariableCount() > 0) { int updatedCount = pivotVariableLengths(pivot.getVariablePivots(), fixedBlock, variable, start, count); Preconditions.checkState(updatedCount <= count); count = updatedCount; } for(VectorPivotDef def : pivot.getFixedPivots()){ switch(def.getType()){ case BIT: pivotBit(def, fixedBlock, start, count); break; case FOUR_BYTE: pivot4Bytes(def, fixedBlock, start, count); break; case EIGHT_BYTE: pivot8Bytes(def, fixedBlock, start, count); break; case SIXTEEN_BYTE: pivot16Bytes(def, fixedBlock, start, count); break; case VARIABLE: default: throw new UnsupportedOperationException("Pivot: unknown type: " + Describer.describe(def.getIncomingVector().getField())); } } return count; } static int pivot(PivotDef pivot, int start, int count, FixedBlockVector fixedBlock, VariableBlockVector variable); }
BoundedPivots { public static int pivot(PivotDef pivot, int start, int count, FixedBlockVector fixedBlock, VariableBlockVector variable) { if (pivot.getVariableCount() > 0) { int updatedCount = pivotVariableLengths(pivot.getVariablePivots(), fixedBlock, variable, start, count); Preconditions.checkState(updatedCount <= count); count = updatedCount; } for(VectorPivotDef def : pivot.getFixedPivots()){ switch(def.getType()){ case BIT: pivotBit(def, fixedBlock, start, count); break; case FOUR_BYTE: pivot4Bytes(def, fixedBlock, start, count); break; case EIGHT_BYTE: pivot8Bytes(def, fixedBlock, start, count); break; case SIXTEEN_BYTE: pivot16Bytes(def, fixedBlock, start, count); break; case VARIABLE: default: throw new UnsupportedOperationException("Pivot: unknown type: " + Describer.describe(def.getIncomingVector().getField())); } } return count; } static int pivot(PivotDef pivot, int start, int count, FixedBlockVector fixedBlock, VariableBlockVector variable); }
@Test public void testMutatorApplyReplacePreview() { boolean preview = true; TransformResult result1 = mutator(preview).apply("foo", "foo", newValue, true); assertEquals(newHashSet("foo (new)"), result1.getAddedColumns()); assertEquals(newHashSet(), result1.getModifiedColumns()); assertEquals(newHashSet("foo"), result1.getRemovedColumns()); assertColIs(null, result1, "foo2"); assertColIs(value, result1, "foo"); assertColIs(newValue, result1, "foo (new)"); }
public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } DatasetStateMutator(String username, VirtualDatasetState virtualDatasetState, boolean preview); }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } DatasetStateMutator(String username, VirtualDatasetState virtualDatasetState, boolean preview); void setSql(QueryMetadata metadata); void addColumn(int index, Column column); void addColumn(Column column); void moveColumn(int index, int dest); int columnCount(); void addJoin(Join join); void updateColumnTables(); String uniqueColumnName(String column); String getDatasetAlias(); void groupedBy(List<Column> newColumns, List<Column> groupBys); void addFilter(Filter filter); void setOrdersList(List<Order> columnsList); TransformResult result(); int indexOfCol(String colName); void nest(); TransformResult rename(String oldCol, String newCol); TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn); TransformResult apply(String oldCol, String newCol, Expression newExpWrapped, boolean dropSourceColumn); Expression findColValueForModification(String colName); Expression findColValue(String colName); boolean isGrouped(); void dropColumn(String droppedColumnName); }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } DatasetStateMutator(String username, VirtualDatasetState virtualDatasetState, boolean preview); void setSql(QueryMetadata metadata); void addColumn(int index, Column column); void addColumn(Column column); void moveColumn(int index, int dest); int columnCount(); void addJoin(Join join); void updateColumnTables(); String uniqueColumnName(String column); String getDatasetAlias(); void groupedBy(List<Column> newColumns, List<Column> groupBys); void addFilter(Filter filter); void setOrdersList(List<Order> columnsList); TransformResult result(); int indexOfCol(String colName); void nest(); TransformResult rename(String oldCol, String newCol); TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn); TransformResult apply(String oldCol, String newCol, Expression newExpWrapped, boolean dropSourceColumn); Expression findColValueForModification(String colName); Expression findColValue(String colName); boolean isGrouped(); void dropColumn(String droppedColumnName); }
@Test public void testWorkOnOOBRuntimeFilterInvalidFilterSize() { int buildMinorFragment1 = 2; int buildMajorFragment1 = 1; try (ArrowBuf oobMessageBuf = testAllocator.buffer(128)) { RuntimeFilter filter1 = newRuntimeFilter(32, "col1", "col2"); OutOfBandMessage msg1 = newOOBMessage(filter1, oobMessageBuf, buildMajorFragment1, buildMinorFragment1); RecordReader mockReader = mock(RecordReader.class); ScanOperator scanOp = new ScanOperator(mock(SubScan.class), getMockContext(), Lists.newArrayList(mockReader).iterator(), null, null, null); scanOp.workOnOOB(msg1); msg1.getBuffer().release(); verify(mockReader, never()).addRuntimeFilter(any(com.dremio.exec.store.RuntimeFilter.class)); } }
@Override public void workOnOOB(OutOfBandMessage message) { final ArrowBuf msgBuf = message.getBuffer(); final String senderInfo = String.format("Frag %d:%d, OpId %d", message.getSendingMajorFragmentId(), message.getSendingMinorFragmentId(), message.getSendingOperatorId()); if (msgBuf==null || msgBuf.capacity()==0) { logger.warn("Empty runtime filter received from {}", senderInfo); return; } msgBuf.retain(); logger.info("Filter received from {}", senderInfo); try(RollbackCloseable closeOnErr = new RollbackCloseable()) { closeOnErr.add(msgBuf); final BloomFilter bloomFilter = BloomFilter.prepareFrom(msgBuf); final ExecProtos.RuntimeFilter protoFilter = message.getPayload(ExecProtos.RuntimeFilter.parser()); final RuntimeFilter filter = RuntimeFilter.getInstance(protoFilter, bloomFilter, senderInfo); boolean isAlreadyPresent = this.runtimeFilters.stream().anyMatch(r -> r.isOnSameColumns(filter)); if (protoFilter.getPartitionColumnFilter().getSizeBytes() != bloomFilter.getSizeInBytes()) { logger.error("Invalid incoming runtime filter size. Expected size {}, actual size {}, filter {}", protoFilter.getPartitionColumnFilter().getSizeBytes(), bloomFilter.getSizeInBytes(), bloomFilter.toString()); AutoCloseables.close(filter); } else if (isAlreadyPresent) { logger.debug("Skipping enforcement because filter is already present {}", filter); AutoCloseables.close(filter); } else { logger.debug("Adding filter to the record readers {}, current reader {}, FPP {}.", filter, this.currentReader.getClass().getName(), bloomFilter.getExpectedFPP()); this.runtimeFilters.add(filter); this.currentReader.addRuntimeFilter(filter); } closeOnErr.commit(); } catch (Exception e) { logger.warn("Error while merging runtime filter piece from " + message.getSendingMajorFragmentId() + ":" + message.getSendingMinorFragmentId(), e); } }
ScanOperator implements ProducerOperator { @Override public void workOnOOB(OutOfBandMessage message) { final ArrowBuf msgBuf = message.getBuffer(); final String senderInfo = String.format("Frag %d:%d, OpId %d", message.getSendingMajorFragmentId(), message.getSendingMinorFragmentId(), message.getSendingOperatorId()); if (msgBuf==null || msgBuf.capacity()==0) { logger.warn("Empty runtime filter received from {}", senderInfo); return; } msgBuf.retain(); logger.info("Filter received from {}", senderInfo); try(RollbackCloseable closeOnErr = new RollbackCloseable()) { closeOnErr.add(msgBuf); final BloomFilter bloomFilter = BloomFilter.prepareFrom(msgBuf); final ExecProtos.RuntimeFilter protoFilter = message.getPayload(ExecProtos.RuntimeFilter.parser()); final RuntimeFilter filter = RuntimeFilter.getInstance(protoFilter, bloomFilter, senderInfo); boolean isAlreadyPresent = this.runtimeFilters.stream().anyMatch(r -> r.isOnSameColumns(filter)); if (protoFilter.getPartitionColumnFilter().getSizeBytes() != bloomFilter.getSizeInBytes()) { logger.error("Invalid incoming runtime filter size. Expected size {}, actual size {}, filter {}", protoFilter.getPartitionColumnFilter().getSizeBytes(), bloomFilter.getSizeInBytes(), bloomFilter.toString()); AutoCloseables.close(filter); } else if (isAlreadyPresent) { logger.debug("Skipping enforcement because filter is already present {}", filter); AutoCloseables.close(filter); } else { logger.debug("Adding filter to the record readers {}, current reader {}, FPP {}.", filter, this.currentReader.getClass().getName(), bloomFilter.getExpectedFPP()); this.runtimeFilters.add(filter); this.currentReader.addRuntimeFilter(filter); } closeOnErr.commit(); } catch (Exception e) { logger.warn("Error while merging runtime filter piece from " + message.getSendingMajorFragmentId() + ":" + message.getSendingMinorFragmentId(), e); } } }
ScanOperator implements ProducerOperator { @Override public void workOnOOB(OutOfBandMessage message) { final ArrowBuf msgBuf = message.getBuffer(); final String senderInfo = String.format("Frag %d:%d, OpId %d", message.getSendingMajorFragmentId(), message.getSendingMinorFragmentId(), message.getSendingOperatorId()); if (msgBuf==null || msgBuf.capacity()==0) { logger.warn("Empty runtime filter received from {}", senderInfo); return; } msgBuf.retain(); logger.info("Filter received from {}", senderInfo); try(RollbackCloseable closeOnErr = new RollbackCloseable()) { closeOnErr.add(msgBuf); final BloomFilter bloomFilter = BloomFilter.prepareFrom(msgBuf); final ExecProtos.RuntimeFilter protoFilter = message.getPayload(ExecProtos.RuntimeFilter.parser()); final RuntimeFilter filter = RuntimeFilter.getInstance(protoFilter, bloomFilter, senderInfo); boolean isAlreadyPresent = this.runtimeFilters.stream().anyMatch(r -> r.isOnSameColumns(filter)); if (protoFilter.getPartitionColumnFilter().getSizeBytes() != bloomFilter.getSizeInBytes()) { logger.error("Invalid incoming runtime filter size. Expected size {}, actual size {}, filter {}", protoFilter.getPartitionColumnFilter().getSizeBytes(), bloomFilter.getSizeInBytes(), bloomFilter.toString()); AutoCloseables.close(filter); } else if (isAlreadyPresent) { logger.debug("Skipping enforcement because filter is already present {}", filter); AutoCloseables.close(filter); } else { logger.debug("Adding filter to the record readers {}, current reader {}, FPP {}.", filter, this.currentReader.getClass().getName(), bloomFilter.getExpectedFPP()); this.runtimeFilters.add(filter); this.currentReader.addRuntimeFilter(filter); } closeOnErr.commit(); } catch (Exception e) { logger.warn("Error while merging runtime filter piece from " + message.getSendingMajorFragmentId() + ":" + message.getSendingMinorFragmentId(), e); } } ScanOperator(SubScan config, OperatorContext context, Iterator<RecordReader> readers); ScanOperator(SubScan config, OperatorContext context, Iterator<RecordReader> readers, GlobalDictionaries globalDictionaries, CoordinationProtos.NodeEndpoint foremanEndpoint, CoordExecRPC.QueryContextInformation queryContextInformation); }
ScanOperator implements ProducerOperator { @Override public void workOnOOB(OutOfBandMessage message) { final ArrowBuf msgBuf = message.getBuffer(); final String senderInfo = String.format("Frag %d:%d, OpId %d", message.getSendingMajorFragmentId(), message.getSendingMinorFragmentId(), message.getSendingOperatorId()); if (msgBuf==null || msgBuf.capacity()==0) { logger.warn("Empty runtime filter received from {}", senderInfo); return; } msgBuf.retain(); logger.info("Filter received from {}", senderInfo); try(RollbackCloseable closeOnErr = new RollbackCloseable()) { closeOnErr.add(msgBuf); final BloomFilter bloomFilter = BloomFilter.prepareFrom(msgBuf); final ExecProtos.RuntimeFilter protoFilter = message.getPayload(ExecProtos.RuntimeFilter.parser()); final RuntimeFilter filter = RuntimeFilter.getInstance(protoFilter, bloomFilter, senderInfo); boolean isAlreadyPresent = this.runtimeFilters.stream().anyMatch(r -> r.isOnSameColumns(filter)); if (protoFilter.getPartitionColumnFilter().getSizeBytes() != bloomFilter.getSizeInBytes()) { logger.error("Invalid incoming runtime filter size. Expected size {}, actual size {}, filter {}", protoFilter.getPartitionColumnFilter().getSizeBytes(), bloomFilter.getSizeInBytes(), bloomFilter.toString()); AutoCloseables.close(filter); } else if (isAlreadyPresent) { logger.debug("Skipping enforcement because filter is already present {}", filter); AutoCloseables.close(filter); } else { logger.debug("Adding filter to the record readers {}, current reader {}, FPP {}.", filter, this.currentReader.getClass().getName(), bloomFilter.getExpectedFPP()); this.runtimeFilters.add(filter); this.currentReader.addRuntimeFilter(filter); } closeOnErr.commit(); } catch (Exception e) { logger.warn("Error while merging runtime filter piece from " + message.getSendingMajorFragmentId() + ":" + message.getSendingMinorFragmentId(), e); } } ScanOperator(SubScan config, OperatorContext context, Iterator<RecordReader> readers); ScanOperator(SubScan config, OperatorContext context, Iterator<RecordReader> readers, GlobalDictionaries globalDictionaries, CoordinationProtos.NodeEndpoint foremanEndpoint, CoordExecRPC.QueryContextInformation queryContextInformation); @Override VectorAccessible setup(); @Override State getState(); @Override int outputData(); @Override void workOnOOB(OutOfBandMessage message); @Override OUT accept(OperatorVisitor<OUT, IN, EXCEP> visitor, IN value); @Override void close(); }
ScanOperator implements ProducerOperator { @Override public void workOnOOB(OutOfBandMessage message) { final ArrowBuf msgBuf = message.getBuffer(); final String senderInfo = String.format("Frag %d:%d, OpId %d", message.getSendingMajorFragmentId(), message.getSendingMinorFragmentId(), message.getSendingOperatorId()); if (msgBuf==null || msgBuf.capacity()==0) { logger.warn("Empty runtime filter received from {}", senderInfo); return; } msgBuf.retain(); logger.info("Filter received from {}", senderInfo); try(RollbackCloseable closeOnErr = new RollbackCloseable()) { closeOnErr.add(msgBuf); final BloomFilter bloomFilter = BloomFilter.prepareFrom(msgBuf); final ExecProtos.RuntimeFilter protoFilter = message.getPayload(ExecProtos.RuntimeFilter.parser()); final RuntimeFilter filter = RuntimeFilter.getInstance(protoFilter, bloomFilter, senderInfo); boolean isAlreadyPresent = this.runtimeFilters.stream().anyMatch(r -> r.isOnSameColumns(filter)); if (protoFilter.getPartitionColumnFilter().getSizeBytes() != bloomFilter.getSizeInBytes()) { logger.error("Invalid incoming runtime filter size. Expected size {}, actual size {}, filter {}", protoFilter.getPartitionColumnFilter().getSizeBytes(), bloomFilter.getSizeInBytes(), bloomFilter.toString()); AutoCloseables.close(filter); } else if (isAlreadyPresent) { logger.debug("Skipping enforcement because filter is already present {}", filter); AutoCloseables.close(filter); } else { logger.debug("Adding filter to the record readers {}, current reader {}, FPP {}.", filter, this.currentReader.getClass().getName(), bloomFilter.getExpectedFPP()); this.runtimeFilters.add(filter); this.currentReader.addRuntimeFilter(filter); } closeOnErr.commit(); } catch (Exception e) { logger.warn("Error while merging runtime filter piece from " + message.getSendingMajorFragmentId() + ":" + message.getSendingMinorFragmentId(), e); } } ScanOperator(SubScan config, OperatorContext context, Iterator<RecordReader> readers); ScanOperator(SubScan config, OperatorContext context, Iterator<RecordReader> readers, GlobalDictionaries globalDictionaries, CoordinationProtos.NodeEndpoint foremanEndpoint, CoordExecRPC.QueryContextInformation queryContextInformation); @Override VectorAccessible setup(); @Override State getState(); @Override int outputData(); @Override void workOnOOB(OutOfBandMessage message); @Override OUT accept(OperatorVisitor<OUT, IN, EXCEP> visitor, IN value); @Override void close(); }
@Test public void testIgnoreOtherMessages() throws Exception { RpcCompatibilityEncoder encoder = new RpcCompatibilityEncoder(); ChannelHandlerContext context = mock(ChannelHandlerContext.class); OutboundRpcMessage message = new OutboundRpcMessage(RpcMode.PING, 0, 0, Acks.OK); List<Object> out = new ArrayList<>(); encoder.encode(context, message, out); assertEquals(1, out.size()); assertSame(message, out.get(0)); }
@Override protected void encode(ChannelHandlerContext context, OutboundRpcMessage message, List<Object> out) throws Exception { if (message.mode != RpcMode.RESPONSE_FAILURE) { out.add(message); return; } final MessageLite pBody = message.pBody; if (!(pBody instanceof DremioPBError)) { out.add(message); return; } DremioPBError error = (DremioPBError) pBody; DremioPBError newError = ErrorCompatibility.convertIfNecessary(error); out.add(new OutboundRpcMessage(message.mode, message.rpcType, message.coordinationId, newError, message.dBodies)); }
RpcCompatibilityEncoder extends MessageToMessageEncoder<OutboundRpcMessage> { @Override protected void encode(ChannelHandlerContext context, OutboundRpcMessage message, List<Object> out) throws Exception { if (message.mode != RpcMode.RESPONSE_FAILURE) { out.add(message); return; } final MessageLite pBody = message.pBody; if (!(pBody instanceof DremioPBError)) { out.add(message); return; } DremioPBError error = (DremioPBError) pBody; DremioPBError newError = ErrorCompatibility.convertIfNecessary(error); out.add(new OutboundRpcMessage(message.mode, message.rpcType, message.coordinationId, newError, message.dBodies)); } }
RpcCompatibilityEncoder extends MessageToMessageEncoder<OutboundRpcMessage> { @Override protected void encode(ChannelHandlerContext context, OutboundRpcMessage message, List<Object> out) throws Exception { if (message.mode != RpcMode.RESPONSE_FAILURE) { out.add(message); return; } final MessageLite pBody = message.pBody; if (!(pBody instanceof DremioPBError)) { out.add(message); return; } DremioPBError error = (DremioPBError) pBody; DremioPBError newError = ErrorCompatibility.convertIfNecessary(error); out.add(new OutboundRpcMessage(message.mode, message.rpcType, message.coordinationId, newError, message.dBodies)); } }
RpcCompatibilityEncoder extends MessageToMessageEncoder<OutboundRpcMessage> { @Override protected void encode(ChannelHandlerContext context, OutboundRpcMessage message, List<Object> out) throws Exception { if (message.mode != RpcMode.RESPONSE_FAILURE) { out.add(message); return; } final MessageLite pBody = message.pBody; if (!(pBody instanceof DremioPBError)) { out.add(message); return; } DremioPBError error = (DremioPBError) pBody; DremioPBError newError = ErrorCompatibility.convertIfNecessary(error); out.add(new OutboundRpcMessage(message.mode, message.rpcType, message.coordinationId, newError, message.dBodies)); } }
RpcCompatibilityEncoder extends MessageToMessageEncoder<OutboundRpcMessage> { @Override protected void encode(ChannelHandlerContext context, OutboundRpcMessage message, List<Object> out) throws Exception { if (message.mode != RpcMode.RESPONSE_FAILURE) { out.add(message); return; } final MessageLite pBody = message.pBody; if (!(pBody instanceof DremioPBError)) { out.add(message); return; } DremioPBError error = (DremioPBError) pBody; DremioPBError newError = ErrorCompatibility.convertIfNecessary(error); out.add(new OutboundRpcMessage(message.mode, message.rpcType, message.coordinationId, newError, message.dBodies)); } }
@Test public void testIgnoreNonDremioPBErrorMessage() throws Exception { RpcCompatibilityEncoder encoder = new RpcCompatibilityEncoder(); ChannelHandlerContext context = mock(ChannelHandlerContext.class); OutboundRpcMessage message = new OutboundRpcMessage(RpcMode.RESPONSE_FAILURE, 0, 0, Acks.OK); List<Object> out = new ArrayList<>(); encoder.encode(context, message, out); assertEquals(1, out.size()); assertSame(message, out.get(0)); }
@Override protected void encode(ChannelHandlerContext context, OutboundRpcMessage message, List<Object> out) throws Exception { if (message.mode != RpcMode.RESPONSE_FAILURE) { out.add(message); return; } final MessageLite pBody = message.pBody; if (!(pBody instanceof DremioPBError)) { out.add(message); return; } DremioPBError error = (DremioPBError) pBody; DremioPBError newError = ErrorCompatibility.convertIfNecessary(error); out.add(new OutboundRpcMessage(message.mode, message.rpcType, message.coordinationId, newError, message.dBodies)); }
RpcCompatibilityEncoder extends MessageToMessageEncoder<OutboundRpcMessage> { @Override protected void encode(ChannelHandlerContext context, OutboundRpcMessage message, List<Object> out) throws Exception { if (message.mode != RpcMode.RESPONSE_FAILURE) { out.add(message); return; } final MessageLite pBody = message.pBody; if (!(pBody instanceof DremioPBError)) { out.add(message); return; } DremioPBError error = (DremioPBError) pBody; DremioPBError newError = ErrorCompatibility.convertIfNecessary(error); out.add(new OutboundRpcMessage(message.mode, message.rpcType, message.coordinationId, newError, message.dBodies)); } }
RpcCompatibilityEncoder extends MessageToMessageEncoder<OutboundRpcMessage> { @Override protected void encode(ChannelHandlerContext context, OutboundRpcMessage message, List<Object> out) throws Exception { if (message.mode != RpcMode.RESPONSE_FAILURE) { out.add(message); return; } final MessageLite pBody = message.pBody; if (!(pBody instanceof DremioPBError)) { out.add(message); return; } DremioPBError error = (DremioPBError) pBody; DremioPBError newError = ErrorCompatibility.convertIfNecessary(error); out.add(new OutboundRpcMessage(message.mode, message.rpcType, message.coordinationId, newError, message.dBodies)); } }
RpcCompatibilityEncoder extends MessageToMessageEncoder<OutboundRpcMessage> { @Override protected void encode(ChannelHandlerContext context, OutboundRpcMessage message, List<Object> out) throws Exception { if (message.mode != RpcMode.RESPONSE_FAILURE) { out.add(message); return; } final MessageLite pBody = message.pBody; if (!(pBody instanceof DremioPBError)) { out.add(message); return; } DremioPBError error = (DremioPBError) pBody; DremioPBError newError = ErrorCompatibility.convertIfNecessary(error); out.add(new OutboundRpcMessage(message.mode, message.rpcType, message.coordinationId, newError, message.dBodies)); } }
RpcCompatibilityEncoder extends MessageToMessageEncoder<OutboundRpcMessage> { @Override protected void encode(ChannelHandlerContext context, OutboundRpcMessage message, List<Object> out) throws Exception { if (message.mode != RpcMode.RESPONSE_FAILURE) { out.add(message); return; } final MessageLite pBody = message.pBody; if (!(pBody instanceof DremioPBError)) { out.add(message); return; } DremioPBError error = (DremioPBError) pBody; DremioPBError newError = ErrorCompatibility.convertIfNecessary(error); out.add(new OutboundRpcMessage(message.mode, message.rpcType, message.coordinationId, newError, message.dBodies)); } }
@Test public void testUpdateErrorType() throws Exception { RpcCompatibilityEncoder encoder = new RpcCompatibilityEncoder(); ChannelHandlerContext context = mock(ChannelHandlerContext.class); DremioPBError error = DremioPBError.newBuilder() .setErrorType(ErrorType.IO_EXCEPTION) .setMessage("test message") .build(); OutboundRpcMessage message = new OutboundRpcMessage(RpcMode.RESPONSE_FAILURE, RpcType.RESP_QUERY_PROFILE, 12, error); List<Object> out = new ArrayList<>(); encoder.encode(context, message, out); assertEquals(1, out.size()); OutboundRpcMessage received = (OutboundRpcMessage) out.get(0); assertEquals(RpcMode.RESPONSE_FAILURE, received.mode); assertEquals(12, received.coordinationId); DremioPBError newError = (DremioPBError) received.pBody; assertEquals(ErrorType.RESOURCE, newError.getErrorType()); assertEquals("test message", newError.getMessage()); }
@Override protected void encode(ChannelHandlerContext context, OutboundRpcMessage message, List<Object> out) throws Exception { if (message.mode != RpcMode.RESPONSE_FAILURE) { out.add(message); return; } final MessageLite pBody = message.pBody; if (!(pBody instanceof DremioPBError)) { out.add(message); return; } DremioPBError error = (DremioPBError) pBody; DremioPBError newError = ErrorCompatibility.convertIfNecessary(error); out.add(new OutboundRpcMessage(message.mode, message.rpcType, message.coordinationId, newError, message.dBodies)); }
RpcCompatibilityEncoder extends MessageToMessageEncoder<OutboundRpcMessage> { @Override protected void encode(ChannelHandlerContext context, OutboundRpcMessage message, List<Object> out) throws Exception { if (message.mode != RpcMode.RESPONSE_FAILURE) { out.add(message); return; } final MessageLite pBody = message.pBody; if (!(pBody instanceof DremioPBError)) { out.add(message); return; } DremioPBError error = (DremioPBError) pBody; DremioPBError newError = ErrorCompatibility.convertIfNecessary(error); out.add(new OutboundRpcMessage(message.mode, message.rpcType, message.coordinationId, newError, message.dBodies)); } }
RpcCompatibilityEncoder extends MessageToMessageEncoder<OutboundRpcMessage> { @Override protected void encode(ChannelHandlerContext context, OutboundRpcMessage message, List<Object> out) throws Exception { if (message.mode != RpcMode.RESPONSE_FAILURE) { out.add(message); return; } final MessageLite pBody = message.pBody; if (!(pBody instanceof DremioPBError)) { out.add(message); return; } DremioPBError error = (DremioPBError) pBody; DremioPBError newError = ErrorCompatibility.convertIfNecessary(error); out.add(new OutboundRpcMessage(message.mode, message.rpcType, message.coordinationId, newError, message.dBodies)); } }
RpcCompatibilityEncoder extends MessageToMessageEncoder<OutboundRpcMessage> { @Override protected void encode(ChannelHandlerContext context, OutboundRpcMessage message, List<Object> out) throws Exception { if (message.mode != RpcMode.RESPONSE_FAILURE) { out.add(message); return; } final MessageLite pBody = message.pBody; if (!(pBody instanceof DremioPBError)) { out.add(message); return; } DremioPBError error = (DremioPBError) pBody; DremioPBError newError = ErrorCompatibility.convertIfNecessary(error); out.add(new OutboundRpcMessage(message.mode, message.rpcType, message.coordinationId, newError, message.dBodies)); } }
RpcCompatibilityEncoder extends MessageToMessageEncoder<OutboundRpcMessage> { @Override protected void encode(ChannelHandlerContext context, OutboundRpcMessage message, List<Object> out) throws Exception { if (message.mode != RpcMode.RESPONSE_FAILURE) { out.add(message); return; } final MessageLite pBody = message.pBody; if (!(pBody instanceof DremioPBError)) { out.add(message); return; } DremioPBError error = (DremioPBError) pBody; DremioPBError newError = ErrorCompatibility.convertIfNecessary(error); out.add(new OutboundRpcMessage(message.mode, message.rpcType, message.coordinationId, newError, message.dBodies)); } }
@Test public void testEmptyFragmentList() { InOrder inOrder = Mockito.inOrder(completionListener, queryCloser); FragmentTracker fragmentTracker = new FragmentTracker(queryId, completionListener, queryCloser, null, new LocalExecutorSetService(DirectProvider.wrap(coordinator), DirectProvider.wrap(optionManager))); fragmentTracker.populate(Collections.emptyList(), new ResourceSchedulingDecisionInfo()); inOrder.verify(completionListener).succeeded(); inOrder.verify(queryCloser).run(); }
public void populate(List<PlanFragmentFull> fragments, ResourceSchedulingDecisionInfo decisionInfo) { for (PlanFragmentFull fragment : fragments) { final NodeEndpoint assignment = fragment.getMinor().getAssignment(); pendingNodes.add(assignment); } executorSet = executorSetService.getExecutorSet(decisionInfo.getEngineId(), decisionInfo.getSubEngineId()); executorSet.addNodeStatusListener(nodeStatusListener); validateEndpoints(); checkAndNotifyCompletionListener(); }
FragmentTracker implements AutoCloseable { public void populate(List<PlanFragmentFull> fragments, ResourceSchedulingDecisionInfo decisionInfo) { for (PlanFragmentFull fragment : fragments) { final NodeEndpoint assignment = fragment.getMinor().getAssignment(); pendingNodes.add(assignment); } executorSet = executorSetService.getExecutorSet(decisionInfo.getEngineId(), decisionInfo.getSubEngineId()); executorSet.addNodeStatusListener(nodeStatusListener); validateEndpoints(); checkAndNotifyCompletionListener(); } }
FragmentTracker implements AutoCloseable { public void populate(List<PlanFragmentFull> fragments, ResourceSchedulingDecisionInfo decisionInfo) { for (PlanFragmentFull fragment : fragments) { final NodeEndpoint assignment = fragment.getMinor().getAssignment(); pendingNodes.add(assignment); } executorSet = executorSetService.getExecutorSet(decisionInfo.getEngineId(), decisionInfo.getSubEngineId()); executorSet.addNodeStatusListener(nodeStatusListener); validateEndpoints(); checkAndNotifyCompletionListener(); } FragmentTracker( QueryId queryId, CompletionListener completionListener, Runnable queryCloser, ExecutorServiceClientFactory executorServiceClientFactory, ExecutorSetService executorSetService ); }
FragmentTracker implements AutoCloseable { public void populate(List<PlanFragmentFull> fragments, ResourceSchedulingDecisionInfo decisionInfo) { for (PlanFragmentFull fragment : fragments) { final NodeEndpoint assignment = fragment.getMinor().getAssignment(); pendingNodes.add(assignment); } executorSet = executorSetService.getExecutorSet(decisionInfo.getEngineId(), decisionInfo.getSubEngineId()); executorSet.addNodeStatusListener(nodeStatusListener); validateEndpoints(); checkAndNotifyCompletionListener(); } FragmentTracker( QueryId queryId, CompletionListener completionListener, Runnable queryCloser, ExecutorServiceClientFactory executorServiceClientFactory, ExecutorSetService executorSetService ); QueryId getQueryId(); void populate(List<PlanFragmentFull> fragments, ResourceSchedulingDecisionInfo decisionInfo); void nodeMarkFirstError(NodeQueryFirstError firstError); void nodeCompleted(NodeQueryCompletion completion); void screenCompleted(); @Override void close(); }
FragmentTracker implements AutoCloseable { public void populate(List<PlanFragmentFull> fragments, ResourceSchedulingDecisionInfo decisionInfo) { for (PlanFragmentFull fragment : fragments) { final NodeEndpoint assignment = fragment.getMinor().getAssignment(); pendingNodes.add(assignment); } executorSet = executorSetService.getExecutorSet(decisionInfo.getEngineId(), decisionInfo.getSubEngineId()); executorSet.addNodeStatusListener(nodeStatusListener); validateEndpoints(); checkAndNotifyCompletionListener(); } FragmentTracker( QueryId queryId, CompletionListener completionListener, Runnable queryCloser, ExecutorServiceClientFactory executorServiceClientFactory, ExecutorSetService executorSetService ); QueryId getQueryId(); void populate(List<PlanFragmentFull> fragments, ResourceSchedulingDecisionInfo decisionInfo); void nodeMarkFirstError(NodeQueryFirstError firstError); void nodeCompleted(NodeQueryCompletion completion); void screenCompleted(); @Override void close(); }
@Test public void testUnsupportedRexNode() { try { RelDataTypeFactory relFactory = SqlTypeFactoryImpl.INSTANCE; RexBuilder rex = new DremioRexBuilder(relFactory); RelDataType anyType = relFactory.createSqlType(SqlTypeName.ANY); List<RexNode> emptyList = new LinkedList<>(); ImmutableList<RexFieldCollation> e = ImmutableList.copyOf(new RexFieldCollation[0]); RexNode window = rex.makeOver(anyType, SqlStdOperatorTable.AVG, emptyList, emptyList, e, null, null, true, false, false, false); RexToExpr.toExpr(null, null, null, window); } catch (UserException e) { if (e.getMessage().contains(RexToExpr.UNSUPPORTED_REX_NODE_ERROR)) { return; } Assert.fail("Hit exception with unexpected error message"); } Assert.fail("Failed to raise the expected exception"); }
public static LogicalExpression toExpr(ParseContext context, RelDataType rowType, RexBuilder rexBuilder, RexNode expr) { return toExpr(context, rowType, rexBuilder, expr, true); }
RexToExpr { public static LogicalExpression toExpr(ParseContext context, RelDataType rowType, RexBuilder rexBuilder, RexNode expr) { return toExpr(context, rowType, rexBuilder, expr, true); } }
RexToExpr { public static LogicalExpression toExpr(ParseContext context, RelDataType rowType, RexBuilder rexBuilder, RexNode expr) { return toExpr(context, rowType, rexBuilder, expr, true); } }
RexToExpr { public static LogicalExpression toExpr(ParseContext context, RelDataType rowType, RexBuilder rexBuilder, RexNode expr) { return toExpr(context, rowType, rexBuilder, expr, true); } static LogicalExpression toExpr(ParseContext context, RelDataType rowType, RexBuilder rexBuilder, RexNode expr); static LogicalExpression toExpr(ParseContext context, RelDataType rowType, RexBuilder rexBuilder, RexNode expr, boolean throwUserException); static LogicalExpression toExpr(ParseContext context, RelDataType rowType, RexBuilder rexBuilder, RexNode expr, boolean throwUserException, IntFunction<Optional<Integer>> inputFunction); static List<NamedExpression> projectToExpr(ParseContext context, List<Pair<RexNode, String>> projects, RelNode input); static List<NamedExpression> groupSetToExpr(RelNode input, ImmutableBitSet groupSet); static List<NamedExpression> aggsToExpr( RelDataType rowType, RelNode input, ImmutableBitSet groupSet, List<AggregateCall> aggCalls); static boolean isLiteralNull(RexLiteral literal); }
RexToExpr { public static LogicalExpression toExpr(ParseContext context, RelDataType rowType, RexBuilder rexBuilder, RexNode expr) { return toExpr(context, rowType, rexBuilder, expr, true); } static LogicalExpression toExpr(ParseContext context, RelDataType rowType, RexBuilder rexBuilder, RexNode expr); static LogicalExpression toExpr(ParseContext context, RelDataType rowType, RexBuilder rexBuilder, RexNode expr, boolean throwUserException); static LogicalExpression toExpr(ParseContext context, RelDataType rowType, RexBuilder rexBuilder, RexNode expr, boolean throwUserException, IntFunction<Optional<Integer>> inputFunction); static List<NamedExpression> projectToExpr(ParseContext context, List<Pair<RexNode, String>> projects, RelNode input); static List<NamedExpression> groupSetToExpr(RelNode input, ImmutableBitSet groupSet); static List<NamedExpression> aggsToExpr( RelDataType rowType, RelNode input, ImmutableBitSet groupSet, List<AggregateCall> aggCalls); static boolean isLiteralNull(RexLiteral literal); static final String UNSUPPORTED_REX_NODE_ERROR; }
@Test public void testNodesSorting() throws Exception { List<CoordinationProtos.NodeEndpoint> activeEndpoints = ImmutableList.of(N2_EP1, N3_EP1, N4_EP2); EndpointAffinity N1_EP1A = new EndpointAffinity(N1_EP1, 0.5); EndpointAffinity N1_EP2A = new EndpointAffinity(N1_EP2, 0.5); EndpointAffinity N2_EP1A = new EndpointAffinity(N2_EP1, 0.5); Map<CoordinationProtos.NodeEndpoint, EndpointAffinity> endpointAffinityMap = ImmutableMap.of(N1_EP1, N1_EP1A, N2_EP1, N2_EP1A, N1_EP2, N1_EP2A); List<CoordinationProtos.NodeEndpoint> endpoints = SoftAffinityFragmentParallelizer.INSTANCE .findEndpoints( activeEndpoints, endpointAffinityMap, 1, newParameters(3, 5, 10, 0.3D, false)); assertNotNull(endpoints); assertEquals(1, endpoints.size()); assertEquals(N2_EP1, endpoints.get(0)); N2_EP1A = new EndpointAffinity(N2_EP1, 0.3); endpointAffinityMap = ImmutableMap.of(N1_EP1, N1_EP1A, N2_EP1, N2_EP1A, N1_EP2, N1_EP2A); endpoints = SoftAffinityFragmentParallelizer.INSTANCE .findEndpoints( activeEndpoints, endpointAffinityMap, 1, newParameters(3, 5, 10, 0.3D, false)); assertNotNull(endpoints); assertEquals(1, endpoints.size()); assertEquals(N2_EP1, endpoints.get(0)); }
@VisibleForTesting List<NodeEndpoint> findEndpoints(final Collection<NodeEndpoint> activeEndpoints, final Map<NodeEndpoint, EndpointAffinity> endpointAffinityMap, final int width, final ParallelizationParameters parameters) throws PhysicalOperatorSetupException { List<EndpointAffinity> sortedAffinityList; Set<NodeEndpoint> endpointsWithAffinity; if (endpointAffinityMap.isEmpty()) { endpointsWithAffinity = ImmutableSet.of(); sortedAffinityList = ImmutableList.of(); } else { final Set<NodeEndpoint> activeEndpointsSet = ImmutableSet.copyOf(activeEndpoints); sortedAffinityList = endpointAffinityMap.values() .stream() .filter((endpointAffinity) -> activeEndpointsSet.contains(endpointAffinity.getEndpoint())) .sorted(Comparator.comparing(EndpointAffinity::getAffinity).reversed()) .collect(ImmutableList.toImmutableList()); endpointsWithAffinity = sortedAffinityList.stream() .map(EndpointAffinity::getEndpoint) .collect(ImmutableSet.toImmutableSet()); } final List<NodeEndpoint> endpoints = Lists.newArrayList(); if (!sortedAffinityList.isEmpty()) { int numRequiredNodes = 0; for (EndpointAffinity ep : sortedAffinityList) { if (ep.isAssignmentRequired()) { numRequiredNodes++; } else { break; } } if (width < numRequiredNodes) { throw new PhysicalOperatorSetupException("Can not parallelize the fragment as the parallelization width (" + width + ") is " + "less than the number of mandatory nodes (" + numRequiredNodes + " nodes with +INFINITE affinity)."); } int affinedSlots = Math.max(1, (int) (parameters.getAffinityFactor() * width / activeEndpoints.size())) * sortedAffinityList.size(); affinedSlots = Math.max(affinedSlots, numRequiredNodes); affinedSlots = Math.min(affinedSlots, width); Iterator<EndpointAffinity> affinedEPItr = Iterators.cycle(sortedAffinityList); while (endpoints.size() < affinedSlots) { EndpointAffinity ea = affinedEPItr.next(); endpoints.add(ea.getEndpoint()); } } if (endpoints.size() < width) { List<NodeEndpoint> endpointsWithNoAffinity; if (endpointsWithAffinity.isEmpty()) { endpointsWithNoAffinity = Lists.newArrayList(activeEndpoints); } else { endpointsWithNoAffinity = Lists.newArrayList(Sets.difference(ImmutableSet.copyOf(activeEndpoints), endpointsWithAffinity)); } Collections.shuffle(endpointsWithNoAffinity, ThreadLocalRandom.current()); Iterator<NodeEndpoint> otherEPItr = Iterators.cycle(endpointsWithNoAffinity.size() > 0 ? endpointsWithNoAffinity : endpointsWithAffinity); while (endpoints.size() < width) { endpoints.add(otherEPItr.next()); } } return endpoints; }
SoftAffinityFragmentParallelizer implements FragmentParallelizer { @VisibleForTesting List<NodeEndpoint> findEndpoints(final Collection<NodeEndpoint> activeEndpoints, final Map<NodeEndpoint, EndpointAffinity> endpointAffinityMap, final int width, final ParallelizationParameters parameters) throws PhysicalOperatorSetupException { List<EndpointAffinity> sortedAffinityList; Set<NodeEndpoint> endpointsWithAffinity; if (endpointAffinityMap.isEmpty()) { endpointsWithAffinity = ImmutableSet.of(); sortedAffinityList = ImmutableList.of(); } else { final Set<NodeEndpoint> activeEndpointsSet = ImmutableSet.copyOf(activeEndpoints); sortedAffinityList = endpointAffinityMap.values() .stream() .filter((endpointAffinity) -> activeEndpointsSet.contains(endpointAffinity.getEndpoint())) .sorted(Comparator.comparing(EndpointAffinity::getAffinity).reversed()) .collect(ImmutableList.toImmutableList()); endpointsWithAffinity = sortedAffinityList.stream() .map(EndpointAffinity::getEndpoint) .collect(ImmutableSet.toImmutableSet()); } final List<NodeEndpoint> endpoints = Lists.newArrayList(); if (!sortedAffinityList.isEmpty()) { int numRequiredNodes = 0; for (EndpointAffinity ep : sortedAffinityList) { if (ep.isAssignmentRequired()) { numRequiredNodes++; } else { break; } } if (width < numRequiredNodes) { throw new PhysicalOperatorSetupException("Can not parallelize the fragment as the parallelization width (" + width + ") is " + "less than the number of mandatory nodes (" + numRequiredNodes + " nodes with +INFINITE affinity)."); } int affinedSlots = Math.max(1, (int) (parameters.getAffinityFactor() * width / activeEndpoints.size())) * sortedAffinityList.size(); affinedSlots = Math.max(affinedSlots, numRequiredNodes); affinedSlots = Math.min(affinedSlots, width); Iterator<EndpointAffinity> affinedEPItr = Iterators.cycle(sortedAffinityList); while (endpoints.size() < affinedSlots) { EndpointAffinity ea = affinedEPItr.next(); endpoints.add(ea.getEndpoint()); } } if (endpoints.size() < width) { List<NodeEndpoint> endpointsWithNoAffinity; if (endpointsWithAffinity.isEmpty()) { endpointsWithNoAffinity = Lists.newArrayList(activeEndpoints); } else { endpointsWithNoAffinity = Lists.newArrayList(Sets.difference(ImmutableSet.copyOf(activeEndpoints), endpointsWithAffinity)); } Collections.shuffle(endpointsWithNoAffinity, ThreadLocalRandom.current()); Iterator<NodeEndpoint> otherEPItr = Iterators.cycle(endpointsWithNoAffinity.size() > 0 ? endpointsWithNoAffinity : endpointsWithAffinity); while (endpoints.size() < width) { endpoints.add(otherEPItr.next()); } } return endpoints; } }
SoftAffinityFragmentParallelizer implements FragmentParallelizer { @VisibleForTesting List<NodeEndpoint> findEndpoints(final Collection<NodeEndpoint> activeEndpoints, final Map<NodeEndpoint, EndpointAffinity> endpointAffinityMap, final int width, final ParallelizationParameters parameters) throws PhysicalOperatorSetupException { List<EndpointAffinity> sortedAffinityList; Set<NodeEndpoint> endpointsWithAffinity; if (endpointAffinityMap.isEmpty()) { endpointsWithAffinity = ImmutableSet.of(); sortedAffinityList = ImmutableList.of(); } else { final Set<NodeEndpoint> activeEndpointsSet = ImmutableSet.copyOf(activeEndpoints); sortedAffinityList = endpointAffinityMap.values() .stream() .filter((endpointAffinity) -> activeEndpointsSet.contains(endpointAffinity.getEndpoint())) .sorted(Comparator.comparing(EndpointAffinity::getAffinity).reversed()) .collect(ImmutableList.toImmutableList()); endpointsWithAffinity = sortedAffinityList.stream() .map(EndpointAffinity::getEndpoint) .collect(ImmutableSet.toImmutableSet()); } final List<NodeEndpoint> endpoints = Lists.newArrayList(); if (!sortedAffinityList.isEmpty()) { int numRequiredNodes = 0; for (EndpointAffinity ep : sortedAffinityList) { if (ep.isAssignmentRequired()) { numRequiredNodes++; } else { break; } } if (width < numRequiredNodes) { throw new PhysicalOperatorSetupException("Can not parallelize the fragment as the parallelization width (" + width + ") is " + "less than the number of mandatory nodes (" + numRequiredNodes + " nodes with +INFINITE affinity)."); } int affinedSlots = Math.max(1, (int) (parameters.getAffinityFactor() * width / activeEndpoints.size())) * sortedAffinityList.size(); affinedSlots = Math.max(affinedSlots, numRequiredNodes); affinedSlots = Math.min(affinedSlots, width); Iterator<EndpointAffinity> affinedEPItr = Iterators.cycle(sortedAffinityList); while (endpoints.size() < affinedSlots) { EndpointAffinity ea = affinedEPItr.next(); endpoints.add(ea.getEndpoint()); } } if (endpoints.size() < width) { List<NodeEndpoint> endpointsWithNoAffinity; if (endpointsWithAffinity.isEmpty()) { endpointsWithNoAffinity = Lists.newArrayList(activeEndpoints); } else { endpointsWithNoAffinity = Lists.newArrayList(Sets.difference(ImmutableSet.copyOf(activeEndpoints), endpointsWithAffinity)); } Collections.shuffle(endpointsWithNoAffinity, ThreadLocalRandom.current()); Iterator<NodeEndpoint> otherEPItr = Iterators.cycle(endpointsWithNoAffinity.size() > 0 ? endpointsWithNoAffinity : endpointsWithAffinity); while (endpoints.size() < width) { endpoints.add(otherEPItr.next()); } } return endpoints; } }
SoftAffinityFragmentParallelizer implements FragmentParallelizer { @VisibleForTesting List<NodeEndpoint> findEndpoints(final Collection<NodeEndpoint> activeEndpoints, final Map<NodeEndpoint, EndpointAffinity> endpointAffinityMap, final int width, final ParallelizationParameters parameters) throws PhysicalOperatorSetupException { List<EndpointAffinity> sortedAffinityList; Set<NodeEndpoint> endpointsWithAffinity; if (endpointAffinityMap.isEmpty()) { endpointsWithAffinity = ImmutableSet.of(); sortedAffinityList = ImmutableList.of(); } else { final Set<NodeEndpoint> activeEndpointsSet = ImmutableSet.copyOf(activeEndpoints); sortedAffinityList = endpointAffinityMap.values() .stream() .filter((endpointAffinity) -> activeEndpointsSet.contains(endpointAffinity.getEndpoint())) .sorted(Comparator.comparing(EndpointAffinity::getAffinity).reversed()) .collect(ImmutableList.toImmutableList()); endpointsWithAffinity = sortedAffinityList.stream() .map(EndpointAffinity::getEndpoint) .collect(ImmutableSet.toImmutableSet()); } final List<NodeEndpoint> endpoints = Lists.newArrayList(); if (!sortedAffinityList.isEmpty()) { int numRequiredNodes = 0; for (EndpointAffinity ep : sortedAffinityList) { if (ep.isAssignmentRequired()) { numRequiredNodes++; } else { break; } } if (width < numRequiredNodes) { throw new PhysicalOperatorSetupException("Can not parallelize the fragment as the parallelization width (" + width + ") is " + "less than the number of mandatory nodes (" + numRequiredNodes + " nodes with +INFINITE affinity)."); } int affinedSlots = Math.max(1, (int) (parameters.getAffinityFactor() * width / activeEndpoints.size())) * sortedAffinityList.size(); affinedSlots = Math.max(affinedSlots, numRequiredNodes); affinedSlots = Math.min(affinedSlots, width); Iterator<EndpointAffinity> affinedEPItr = Iterators.cycle(sortedAffinityList); while (endpoints.size() < affinedSlots) { EndpointAffinity ea = affinedEPItr.next(); endpoints.add(ea.getEndpoint()); } } if (endpoints.size() < width) { List<NodeEndpoint> endpointsWithNoAffinity; if (endpointsWithAffinity.isEmpty()) { endpointsWithNoAffinity = Lists.newArrayList(activeEndpoints); } else { endpointsWithNoAffinity = Lists.newArrayList(Sets.difference(ImmutableSet.copyOf(activeEndpoints), endpointsWithAffinity)); } Collections.shuffle(endpointsWithNoAffinity, ThreadLocalRandom.current()); Iterator<NodeEndpoint> otherEPItr = Iterators.cycle(endpointsWithNoAffinity.size() > 0 ? endpointsWithNoAffinity : endpointsWithAffinity); while (endpoints.size() < width) { endpoints.add(otherEPItr.next()); } } return endpoints; } @Override void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints); @Override int getIdealFragmentWidth(final Wrapper fragment, final ParallelizationParameters parameters); }
SoftAffinityFragmentParallelizer implements FragmentParallelizer { @VisibleForTesting List<NodeEndpoint> findEndpoints(final Collection<NodeEndpoint> activeEndpoints, final Map<NodeEndpoint, EndpointAffinity> endpointAffinityMap, final int width, final ParallelizationParameters parameters) throws PhysicalOperatorSetupException { List<EndpointAffinity> sortedAffinityList; Set<NodeEndpoint> endpointsWithAffinity; if (endpointAffinityMap.isEmpty()) { endpointsWithAffinity = ImmutableSet.of(); sortedAffinityList = ImmutableList.of(); } else { final Set<NodeEndpoint> activeEndpointsSet = ImmutableSet.copyOf(activeEndpoints); sortedAffinityList = endpointAffinityMap.values() .stream() .filter((endpointAffinity) -> activeEndpointsSet.contains(endpointAffinity.getEndpoint())) .sorted(Comparator.comparing(EndpointAffinity::getAffinity).reversed()) .collect(ImmutableList.toImmutableList()); endpointsWithAffinity = sortedAffinityList.stream() .map(EndpointAffinity::getEndpoint) .collect(ImmutableSet.toImmutableSet()); } final List<NodeEndpoint> endpoints = Lists.newArrayList(); if (!sortedAffinityList.isEmpty()) { int numRequiredNodes = 0; for (EndpointAffinity ep : sortedAffinityList) { if (ep.isAssignmentRequired()) { numRequiredNodes++; } else { break; } } if (width < numRequiredNodes) { throw new PhysicalOperatorSetupException("Can not parallelize the fragment as the parallelization width (" + width + ") is " + "less than the number of mandatory nodes (" + numRequiredNodes + " nodes with +INFINITE affinity)."); } int affinedSlots = Math.max(1, (int) (parameters.getAffinityFactor() * width / activeEndpoints.size())) * sortedAffinityList.size(); affinedSlots = Math.max(affinedSlots, numRequiredNodes); affinedSlots = Math.min(affinedSlots, width); Iterator<EndpointAffinity> affinedEPItr = Iterators.cycle(sortedAffinityList); while (endpoints.size() < affinedSlots) { EndpointAffinity ea = affinedEPItr.next(); endpoints.add(ea.getEndpoint()); } } if (endpoints.size() < width) { List<NodeEndpoint> endpointsWithNoAffinity; if (endpointsWithAffinity.isEmpty()) { endpointsWithNoAffinity = Lists.newArrayList(activeEndpoints); } else { endpointsWithNoAffinity = Lists.newArrayList(Sets.difference(ImmutableSet.copyOf(activeEndpoints), endpointsWithAffinity)); } Collections.shuffle(endpointsWithNoAffinity, ThreadLocalRandom.current()); Iterator<NodeEndpoint> otherEPItr = Iterators.cycle(endpointsWithNoAffinity.size() > 0 ? endpointsWithNoAffinity : endpointsWithAffinity); while (endpoints.size() < width) { endpoints.add(otherEPItr.next()); } } return endpoints; } @Override void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints); @Override int getIdealFragmentWidth(final Wrapper fragment, final ParallelizationParameters parameters); static final SoftAffinityFragmentParallelizer INSTANCE; }
@Test public void indexEndpointSingle() { EndpointsIndex.Builder indexBuilder = new EndpointsIndex.Builder(); NodeEndpoint ep = NodeEndpoint.newBuilder() .setAddress("localhost") .setFabricPort(1700) .build(); MinorFragmentEndpoint expected = new MinorFragmentEndpoint(16, ep); MinorFragmentIndexEndpoint indexEndpoint = indexBuilder.addFragmentEndpoint(16, ep); EndpointsIndex index = new EndpointsIndex(indexBuilder.getAllEndpoints()); MinorFragmentEndpoint out = index.getFragmentEndpoint(indexEndpoint); assertEquals(expected, out); }
public MinorFragmentEndpoint getFragmentEndpoint(MinorFragmentIndexEndpoint ep) { return fragmentsEndpointMap.computeIfAbsent(ep, k -> new MinorFragmentEndpoint(k.getMinorFragmentId(), endpoints.get(k.getEndpointIndex()))); }
EndpointsIndex { public MinorFragmentEndpoint getFragmentEndpoint(MinorFragmentIndexEndpoint ep) { return fragmentsEndpointMap.computeIfAbsent(ep, k -> new MinorFragmentEndpoint(k.getMinorFragmentId(), endpoints.get(k.getEndpointIndex()))); } }
EndpointsIndex { public MinorFragmentEndpoint getFragmentEndpoint(MinorFragmentIndexEndpoint ep) { return fragmentsEndpointMap.computeIfAbsent(ep, k -> new MinorFragmentEndpoint(k.getMinorFragmentId(), endpoints.get(k.getEndpointIndex()))); } EndpointsIndex(List<NodeEndpoint> endpoints); EndpointsIndex(); }
EndpointsIndex { public MinorFragmentEndpoint getFragmentEndpoint(MinorFragmentIndexEndpoint ep) { return fragmentsEndpointMap.computeIfAbsent(ep, k -> new MinorFragmentEndpoint(k.getMinorFragmentId(), endpoints.get(k.getEndpointIndex()))); } EndpointsIndex(List<NodeEndpoint> endpoints); EndpointsIndex(); NodeEndpoint getNodeEndpoint(int idx); MinorFragmentEndpoint getFragmentEndpoint(MinorFragmentIndexEndpoint ep); List<MinorFragmentEndpoint> getFragmentEndpoints(List<MinorFragmentIndexEndpoint> eps); }
EndpointsIndex { public MinorFragmentEndpoint getFragmentEndpoint(MinorFragmentIndexEndpoint ep) { return fragmentsEndpointMap.computeIfAbsent(ep, k -> new MinorFragmentEndpoint(k.getMinorFragmentId(), endpoints.get(k.getEndpointIndex()))); } EndpointsIndex(List<NodeEndpoint> endpoints); EndpointsIndex(); NodeEndpoint getNodeEndpoint(int idx); MinorFragmentEndpoint getFragmentEndpoint(MinorFragmentIndexEndpoint ep); List<MinorFragmentEndpoint> getFragmentEndpoints(List<MinorFragmentIndexEndpoint> eps); }
@Test public void simpleCase1() throws Exception { final Wrapper wrapper = newWrapper(200, 1, 20, Collections.singletonList(new EndpointAffinity(N1_EP1, 1.0, true, 50))); INSTANCE.parallelizeFragment(wrapper, newParameters(SLICE_TARGET_DEFAULT, 5, 20), null); assertEquals(1, wrapper.getWidth()); final List<NodeEndpoint> assignedEps = wrapper.getAssignedEndpoints(); assertEquals(1, assignedEps.size()); assertEquals(N1_EP1, assignedEps.get(0)); }
@Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); @Override void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints); @Override int getIdealFragmentWidth(final Wrapper fragment, final ParallelizationParameters parameters); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); @Override void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints); @Override int getIdealFragmentWidth(final Wrapper fragment, final ParallelizationParameters parameters); static final HardAffinityFragmentParallelizer INSTANCE; }
@Test public void matchHardAffinity() throws Exception { final Wrapper wrapper = newSplitWrapper(200, 1, 20, Collections.singletonList(new EndpointAffinity(N1_EP1, 1.0, true, 20)), new ExecutionNodeMap(ImmutableList.of(N1_EP1)) ); INSTANCE.parallelizeFragment(wrapper, newParameters(SLICE_TARGET_DEFAULT, 5, 20), null); assertEquals(1, wrapper.getWidth()); final List<NodeEndpoint> assignedEps = wrapper.getAssignedEndpoints(); assertEquals(1, assignedEps.size()); assertEquals(N1_EP1, assignedEps.get(0)); }
@Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); @Override void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints); @Override int getIdealFragmentWidth(final Wrapper fragment, final ParallelizationParameters parameters); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); @Override void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints); @Override int getIdealFragmentWidth(final Wrapper fragment, final ParallelizationParameters parameters); static final HardAffinityFragmentParallelizer INSTANCE; }
@Test public void testMutatorApplyDropPreview() { boolean preview = true; TransformResult result2 = mutator(preview).apply("foo", "foo2", newValue, true); assertEquals(newHashSet("foo2"), result2.getAddedColumns()); assertEquals(newHashSet(), result2.getModifiedColumns()); assertEquals(newHashSet("foo"), result2.getRemovedColumns()); assertColIs(value, result2, "foo"); assertColIs(newValue, result2, "foo2"); }
public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } DatasetStateMutator(String username, VirtualDatasetState virtualDatasetState, boolean preview); }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } DatasetStateMutator(String username, VirtualDatasetState virtualDatasetState, boolean preview); void setSql(QueryMetadata metadata); void addColumn(int index, Column column); void addColumn(Column column); void moveColumn(int index, int dest); int columnCount(); void addJoin(Join join); void updateColumnTables(); String uniqueColumnName(String column); String getDatasetAlias(); void groupedBy(List<Column> newColumns, List<Column> groupBys); void addFilter(Filter filter); void setOrdersList(List<Order> columnsList); TransformResult result(); int indexOfCol(String colName); void nest(); TransformResult rename(String oldCol, String newCol); TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn); TransformResult apply(String oldCol, String newCol, Expression newExpWrapped, boolean dropSourceColumn); Expression findColValueForModification(String colName); Expression findColValue(String colName); boolean isGrouped(); void dropColumn(String droppedColumnName); }
DatasetStateMutator { public TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn) { return apply(oldCol, newCol, newExp.wrap(), dropSourceColumn); } DatasetStateMutator(String username, VirtualDatasetState virtualDatasetState, boolean preview); void setSql(QueryMetadata metadata); void addColumn(int index, Column column); void addColumn(Column column); void moveColumn(int index, int dest); int columnCount(); void addJoin(Join join); void updateColumnTables(); String uniqueColumnName(String column); String getDatasetAlias(); void groupedBy(List<Column> newColumns, List<Column> groupBys); void addFilter(Filter filter); void setOrdersList(List<Order> columnsList); TransformResult result(); int indexOfCol(String colName); void nest(); TransformResult rename(String oldCol, String newCol); TransformResult apply(String oldCol, String newCol, ExpressionBase newExp, boolean dropSourceColumn); TransformResult apply(String oldCol, String newCol, Expression newExpWrapped, boolean dropSourceColumn); Expression findColValueForModification(String colName); Expression findColValue(String colName); boolean isGrouped(); void dropColumn(String droppedColumnName); }
@Test public void noMatchHardAffinity() throws Exception { try { final Wrapper wrapper = newSplitWrapper(200, 1, 20, Collections.singletonList(new EndpointAffinity(N1_EP1, 1.0, true, 20)), new ExecutionNodeMap(ImmutableList.of(N2_EP1)) ); INSTANCE.parallelizeFragment(wrapper, newParameters(SLICE_TARGET_DEFAULT, 5, 20), ImmutableList.of(N2_EP1)); fail("Should throw exception as affinity endpoint does not match active endpoint"); } catch (UserException uex) { assertTrue(uex.getMessage().startsWith("No executors are available for data with hard affinity.")); } }
@Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); @Override void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints); @Override int getIdealFragmentWidth(final Wrapper fragment, final ParallelizationParameters parameters); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); @Override void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints); @Override int getIdealFragmentWidth(final Wrapper fragment, final ParallelizationParameters parameters); static final HardAffinityFragmentParallelizer INSTANCE; }
@Test public void simpleCase2() throws Exception { final Wrapper wrapper = newWrapper(200, 1, 20, Collections.singletonList(new EndpointAffinity(N1_EP1, 1.0, true, 50))); INSTANCE.parallelizeFragment(wrapper, newParameters(1, 5, 20), null); assertEquals(5, wrapper.getWidth()); final List<NodeEndpoint> assignedEps = wrapper.getAssignedEndpoints(); assertEquals(5, assignedEps.size()); for (NodeEndpoint ep : assignedEps) { assertEquals(N1_EP1, ep); } }
@Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); @Override void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints); @Override int getIdealFragmentWidth(final Wrapper fragment, final ParallelizationParameters parameters); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); @Override void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints); @Override int getIdealFragmentWidth(final Wrapper fragment, final ParallelizationParameters parameters); static final HardAffinityFragmentParallelizer INSTANCE; }
@Test public void multiNodeCluster1() throws Exception { final Wrapper wrapper = newWrapper(200, 1, 20, ImmutableList.of( new EndpointAffinity(N1_EP1, 0.15, true, 50), new EndpointAffinity(N1_EP2, 0.15, true, 50), new EndpointAffinity(N2_EP1, 0.10, true, 50), new EndpointAffinity(N3_EP2, 0.20, true, 50), new EndpointAffinity(N4_EP2, 0.20, true, 50) )); INSTANCE.parallelizeFragment(wrapper, newParameters(SLICE_TARGET_DEFAULT, 5, 20), null); assertEquals(5, wrapper.getWidth()); final List<NodeEndpoint> assignedEps = wrapper.getAssignedEndpoints(); assertEquals(5, assignedEps.size()); assertTrue(assignedEps.contains(N1_EP1)); assertTrue(assignedEps.contains(N1_EP2)); assertTrue(assignedEps.contains(N2_EP1)); assertTrue(assignedEps.contains(N3_EP2)); assertTrue(assignedEps.contains(N4_EP2)); }
@Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); @Override void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints); @Override int getIdealFragmentWidth(final Wrapper fragment, final ParallelizationParameters parameters); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); @Override void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints); @Override int getIdealFragmentWidth(final Wrapper fragment, final ParallelizationParameters parameters); static final HardAffinityFragmentParallelizer INSTANCE; }
@Test public void multiNodeCluster2() throws Exception { final Wrapper wrapper = newWrapper(200, 1, 20, ImmutableList.of( new EndpointAffinity(N1_EP2, 0.15, true, 50), new EndpointAffinity(N2_EP2, 0.15, true, 50), new EndpointAffinity(N3_EP1, 0.10, true, 50), new EndpointAffinity(N4_EP2, 0.20, true, 50), new EndpointAffinity(N1_EP1, 0.20, true, 50) )); INSTANCE.parallelizeFragment(wrapper, newParameters(1, 5, 20), null); assertEquals(20, wrapper.getWidth()); final List<NodeEndpoint> assignedEps = wrapper.getAssignedEndpoints(); assertEquals(20, assignedEps.size()); final HashMultiset<NodeEndpoint> counts = HashMultiset.create(); for(final NodeEndpoint ep : assignedEps) { counts.add(ep); } assertTrue(counts.count(N1_EP2) <= 5); assertTrue(counts.count(N2_EP2) <= 5); assertTrue(counts.count(N3_EP1) <= 5); assertTrue(counts.count(N4_EP2) <= 5); assertTrue(counts.count(N1_EP1) <= 5); }
@Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); @Override void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints); @Override int getIdealFragmentWidth(final Wrapper fragment, final ParallelizationParameters parameters); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); @Override void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints); @Override int getIdealFragmentWidth(final Wrapper fragment, final ParallelizationParameters parameters); static final HardAffinityFragmentParallelizer INSTANCE; }
@Test public void multiNodeClusterNonNormalizedAffinities() throws Exception { final Wrapper wrapper = newWrapper(2000, 1, 250, ImmutableList.of( new EndpointAffinity(N1_EP2, 15, true, 50), new EndpointAffinity(N2_EP2, 15, true, 50), new EndpointAffinity(N3_EP1, 10, true, 50), new EndpointAffinity(N4_EP2, 20, true, 50), new EndpointAffinity(N1_EP1, 20, true, 50) )); INSTANCE.parallelizeFragment(wrapper, newParameters(100, 20, 80), null); assertEquals(20, wrapper.getWidth()); final List<NodeEndpoint> assignedEps = wrapper.getAssignedEndpoints(); assertEquals(20, assignedEps.size()); final HashMultiset<NodeEndpoint> counts = HashMultiset.create(); for(final NodeEndpoint ep : assignedEps) { counts.add(ep); } assertThat(counts.count(N1_EP2), CoreMatchers.allOf(greaterThan(1), lessThanOrEqualTo(5))); assertThat(counts.count(N2_EP2), CoreMatchers.allOf(greaterThan(1), lessThanOrEqualTo(5))); assertThat(counts.count(N3_EP1), CoreMatchers.allOf(greaterThan(1), lessThanOrEqualTo(5))); assertThat(counts.count(N4_EP2), CoreMatchers.allOf(greaterThan(1), lessThanOrEqualTo(5))); assertThat(counts.count(N1_EP1), CoreMatchers.allOf(greaterThan(1), lessThanOrEqualTo(5))); }
@Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); @Override void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints); @Override int getIdealFragmentWidth(final Wrapper fragment, final ParallelizationParameters parameters); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); @Override void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints); @Override int getIdealFragmentWidth(final Wrapper fragment, final ParallelizationParameters parameters); static final HardAffinityFragmentParallelizer INSTANCE; }
@Test public void multiNodeClusterNegative2() throws Exception { final Wrapper wrapper = newWrapper(200, 1, 3, ImmutableList.of( new EndpointAffinity(N1_EP2, 0.15, true, 50), new EndpointAffinity(N2_EP2, 0.15, true, 50), new EndpointAffinity(N3_EP1, 0.10, true, 50), new EndpointAffinity(N4_EP2, 0.20, true, 50), new EndpointAffinity(N1_EP1, 0.20, true, 50) )); try { INSTANCE.parallelizeFragment(wrapper, newParameters(1, 2, 2), null); fail("Expected an exception, because max fragment width (3) is less than the number of mandatory nodes (5)"); } catch (Exception e) { } }
@Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); @Override void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints); @Override int getIdealFragmentWidth(final Wrapper fragment, final ParallelizationParameters parameters); }
HardAffinityFragmentParallelizer implements FragmentParallelizer { @Override public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints) throws PhysicalOperatorSetupException { final Stats stats = fragmentWrapper.getStats(); final ParallelizationInfo pInfo = stats.getParallelizationInfo(); int totalMaxWidth = 0; final Map<NodeEndpoint, EndpointAffinity> endpointPool = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : pInfo.getEndpointAffinityMap().entrySet()) { if (entry.getValue().isAssignmentRequired()) { endpointPool.put(entry.getKey(), entry.getValue()); totalMaxWidth += Math.min(parameters.getMaxWidthPerNode(), entry.getValue().getMaxWidth()); if (totalMaxWidth < 0) { totalMaxWidth = Integer.MAX_VALUE; } } } int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget()); width = Math.max(endpointPool.size(), width); width = Math.max(1, Math.min(width, pInfo.getMaxWidth())); checkOrThrow(endpointPool.size() <= width, logger, "Number of mandatory endpoints ({}) that require an assignment is more than the allowed fragment max " + "width ({}).", endpointPool.size(), pInfo.getMaxWidth()); width = Math.max(1, Math.min(width, endpointPool.size()*parameters.getMaxWidthPerNode())); width = Math.min(totalMaxWidth, width); final Map<NodeEndpoint, Integer> endpoints = Maps.newHashMap(); for(Entry<NodeEndpoint, EndpointAffinity> entry : endpointPool.entrySet()) { endpoints.put(entry.getKey(), 1); } int totalAssigned = endpoints.size(); double totalAffinity = 1.0; for(EndpointAffinity epAff : endpointPool.values()) { totalAffinity += epAff.getAffinity(); } int remainingSlots = width - endpoints.size(); while (remainingSlots > 0) { for(EndpointAffinity epAf : endpointPool.values()) { final int moreAllocation = (int) Math.ceil( (epAf.getAffinity() / totalAffinity) * remainingSlots); int currentAssignments = endpoints.get(epAf.getEndpoint()); for(int i=0; i < moreAllocation && totalAssigned < width && currentAssignments < parameters.getMaxWidthPerNode() && currentAssignments < epAf.getMaxWidth(); i++) { totalAssigned++; currentAssignments++; } endpoints.put(epAf.getEndpoint(), currentAssignments); } final int previousRemainingSlots = remainingSlots; remainingSlots = width - totalAssigned; if (previousRemainingSlots == remainingSlots) { logger.error("Can't parallelize fragment: " + "Every mandatory node has exhausted the maximum width per node limit." + EOL + "Endpoint pool: {}" + EOL + "Assignment so far: {}" + EOL + "Width: {}", endpointPool, endpoints, width); throw new PhysicalOperatorSetupException("Can not parallelize fragment."); } } final List<NodeEndpoint> assignedEndpoints = Lists.newArrayList(); for(Entry<NodeEndpoint, Integer> entry : endpoints.entrySet()) { for(int i=0; i < entry.getValue(); i++) { assignedEndpoints.add(entry.getKey()); } } fragmentWrapper.setWidth(width); fragmentWrapper.assignEndpoints(parameters, assignedEndpoints); } private HardAffinityFragmentParallelizer(); @Override void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<NodeEndpoint> activeEndpoints); @Override int getIdealFragmentWidth(final Wrapper fragment, final ParallelizationParameters parameters); static final HardAffinityFragmentParallelizer INSTANCE; }
@Test public void multiAttrsInSamePOP() throws Exception { PlanFragmentsIndex.Builder indexBuilder = new PlanFragmentsIndex.Builder(); MinorDataSerDe serDe = new MinorDataSerDe(null, null); MinorDataWriter writer = new MinorDataWriter(null, dummyEndpoint, serDe, indexBuilder); MockStorePOP pop = new MockStorePOP(OpProps.prototype(1), null); List<HBaseSubScanSpec> specList = new ArrayList<>(); for (int i = 0; i < 4; ++i) { HBaseSubScanSpec spec = HBaseSubScanSpec .newBuilder() .setTableName("testTable" + i) .build(); specList.add(spec); writer.writeProtoEntry(pop.getProps(), "testKey" + i, spec); } MinorAttrsMap minorAttrsMap = MinorAttrsMap.create(writer.getAllAttrs()); MinorDataReader reader = new MinorDataReader(null, serDe, null, minorAttrsMap); for (int i = 0; i < 4; ++i) { HBaseSubScanSpec spec = HBaseSubScanSpec.parseFrom(reader.readProtoEntry(pop.getProps(), "testKey" + i)); assertEquals(spec, specList.get(i)); } }
public ByteString readProtoEntry(OpProps props, String key) throws Exception { return attrsMap.getAttrValue(props, key); }
MinorDataReader { public ByteString readProtoEntry(OpProps props, String key) throws Exception { return attrsMap.getAttrValue(props, key); } }
MinorDataReader { public ByteString readProtoEntry(OpProps props, String key) throws Exception { return attrsMap.getAttrValue(props, key); } MinorDataReader( FragmentHandle handle, MinorDataSerDe serDe, PlanFragmentsIndex index, MinorAttrsMap attrsMap); }
MinorDataReader { public ByteString readProtoEntry(OpProps props, String key) throws Exception { return attrsMap.getAttrValue(props, key); } MinorDataReader( FragmentHandle handle, MinorDataSerDe serDe, PlanFragmentsIndex index, MinorAttrsMap attrsMap); FragmentHandle getHandle(); MinorFragmentIndexEndpoint readMinorFragmentIndexEndpoint(OpProps props, String key); List<MinorFragmentIndexEndpoint> readMinorFragmentIndexEndpoints(OpProps props, String key); ByteString readProtoEntry(OpProps props, String key); NormalizedPartitionInfo readSplitPartition(OpProps props, String key); T readJsonEntry(OpProps props, String key, Class<T> clazz); }
MinorDataReader { public ByteString readProtoEntry(OpProps props, String key) throws Exception { return attrsMap.getAttrValue(props, key); } MinorDataReader( FragmentHandle handle, MinorDataSerDe serDe, PlanFragmentsIndex index, MinorAttrsMap attrsMap); FragmentHandle getHandle(); MinorFragmentIndexEndpoint readMinorFragmentIndexEndpoint(OpProps props, String key); List<MinorFragmentIndexEndpoint> readMinorFragmentIndexEndpoints(OpProps props, String key); ByteString readProtoEntry(OpProps props, String key); NormalizedPartitionInfo readSplitPartition(OpProps props, String key); T readJsonEntry(OpProps props, String key, Class<T> clazz); }
@Test public void sameKeyMultiPOPs() throws Exception { PlanFragmentsIndex.Builder indexBuilder = new PlanFragmentsIndex.Builder(); MinorDataSerDe serDe = new MinorDataSerDe(null, null); MinorDataWriter writer = new MinorDataWriter(null, dummyEndpoint, serDe, indexBuilder); MockStorePOP pop1 = new MockStorePOP(OpProps.prototype(1), null); MockStorePOP pop2 = new MockStorePOP(OpProps.prototype(2), null); List<HBaseSubScanSpec> specList = new ArrayList<>(); for (int i = 0; i < 2; ++i) { HBaseSubScanSpec spec = HBaseSubScanSpec .newBuilder() .setTableName("testTable" + i) .build(); specList.add(spec); writer.writeProtoEntry(i == 0 ? pop1.getProps() : pop2.getProps(), "testKey", spec); } MinorAttrsMap minorAttrsMap = MinorAttrsMap.create(writer.getAllAttrs()); MinorDataReader reader = new MinorDataReader(null, serDe, null, minorAttrsMap); HBaseSubScanSpec spec1 = HBaseSubScanSpec.parseFrom(reader.readProtoEntry(pop1.getProps(), "testKey")); assertEquals(spec1, specList.get(0)); HBaseSubScanSpec spec2 = HBaseSubScanSpec.parseFrom(reader.readProtoEntry(pop2.getProps(), "testKey")); assertEquals(spec2, specList.get(1)); }
public ByteString readProtoEntry(OpProps props, String key) throws Exception { return attrsMap.getAttrValue(props, key); }
MinorDataReader { public ByteString readProtoEntry(OpProps props, String key) throws Exception { return attrsMap.getAttrValue(props, key); } }
MinorDataReader { public ByteString readProtoEntry(OpProps props, String key) throws Exception { return attrsMap.getAttrValue(props, key); } MinorDataReader( FragmentHandle handle, MinorDataSerDe serDe, PlanFragmentsIndex index, MinorAttrsMap attrsMap); }
MinorDataReader { public ByteString readProtoEntry(OpProps props, String key) throws Exception { return attrsMap.getAttrValue(props, key); } MinorDataReader( FragmentHandle handle, MinorDataSerDe serDe, PlanFragmentsIndex index, MinorAttrsMap attrsMap); FragmentHandle getHandle(); MinorFragmentIndexEndpoint readMinorFragmentIndexEndpoint(OpProps props, String key); List<MinorFragmentIndexEndpoint> readMinorFragmentIndexEndpoints(OpProps props, String key); ByteString readProtoEntry(OpProps props, String key); NormalizedPartitionInfo readSplitPartition(OpProps props, String key); T readJsonEntry(OpProps props, String key, Class<T> clazz); }
MinorDataReader { public ByteString readProtoEntry(OpProps props, String key) throws Exception { return attrsMap.getAttrValue(props, key); } MinorDataReader( FragmentHandle handle, MinorDataSerDe serDe, PlanFragmentsIndex index, MinorAttrsMap attrsMap); FragmentHandle getHandle(); MinorFragmentIndexEndpoint readMinorFragmentIndexEndpoint(OpProps props, String key); List<MinorFragmentIndexEndpoint> readMinorFragmentIndexEndpoints(OpProps props, String key); ByteString readProtoEntry(OpProps props, String key); NormalizedPartitionInfo readSplitPartition(OpProps props, String key); T readJsonEntry(OpProps props, String key, Class<T> clazz); }
@Test public void testListDiscardsGivenExclusions() { when(excluded.getMaterializationFor(converter)).thenReturn(relOptMat1); when(excluded.getLayoutId()).thenReturn("rid-1"); when(included.getMaterializationFor(converter)).thenReturn(relOptMat2); when(included.getLayoutId()).thenReturn("rid-2"); SubstitutionSettings materializationSettings = new SubstitutionSettings(ImmutableList.of("rid-1")); when(session.getSubstitutionSettings()).thenReturn(materializationSettings); when(provider.get()).thenReturn(ImmutableList.of(excluded, included)); final MaterializationList materializations = new MaterializationList(converter, session, provider); materializations.build(provider); verify(excluded, never()).getMaterializationFor(any(SqlConverter.class)); verify(included, atLeastOnce()).getMaterializationFor(converter); }
@VisibleForTesting protected List<DremioMaterialization> build(final MaterializationDescriptorProvider provider) { final Set<String> exclusions = Sets.newHashSet(session.getSubstitutionSettings().getExclusions()); final Set<String> inclusions = Sets.newHashSet(session.getSubstitutionSettings().getInclusions()); final boolean hasInclusions = !inclusions.isEmpty(); final List<DremioMaterialization> materializations = Lists.newArrayList(); for (final MaterializationDescriptor descriptor : provider.get()) { if( (hasInclusions && !inclusions.contains(descriptor.getLayoutId())) || exclusions.contains(descriptor.getLayoutId()) ) { continue; } try { final DremioMaterialization materialization = descriptor.getMaterializationFor(converter); if (materialization == null) { continue; } mapping.put(TablePath.of(descriptor.getPath()), descriptor); materializations.add(materialization); } catch (Throwable e) { logger.warn("failed to expand materialization {}", descriptor.getMaterializationId(), e); } } return materializations; }
MaterializationList implements MaterializationProvider { @VisibleForTesting protected List<DremioMaterialization> build(final MaterializationDescriptorProvider provider) { final Set<String> exclusions = Sets.newHashSet(session.getSubstitutionSettings().getExclusions()); final Set<String> inclusions = Sets.newHashSet(session.getSubstitutionSettings().getInclusions()); final boolean hasInclusions = !inclusions.isEmpty(); final List<DremioMaterialization> materializations = Lists.newArrayList(); for (final MaterializationDescriptor descriptor : provider.get()) { if( (hasInclusions && !inclusions.contains(descriptor.getLayoutId())) || exclusions.contains(descriptor.getLayoutId()) ) { continue; } try { final DremioMaterialization materialization = descriptor.getMaterializationFor(converter); if (materialization == null) { continue; } mapping.put(TablePath.of(descriptor.getPath()), descriptor); materializations.add(materialization); } catch (Throwable e) { logger.warn("failed to expand materialization {}", descriptor.getMaterializationId(), e); } } return materializations; } }
MaterializationList implements MaterializationProvider { @VisibleForTesting protected List<DremioMaterialization> build(final MaterializationDescriptorProvider provider) { final Set<String> exclusions = Sets.newHashSet(session.getSubstitutionSettings().getExclusions()); final Set<String> inclusions = Sets.newHashSet(session.getSubstitutionSettings().getInclusions()); final boolean hasInclusions = !inclusions.isEmpty(); final List<DremioMaterialization> materializations = Lists.newArrayList(); for (final MaterializationDescriptor descriptor : provider.get()) { if( (hasInclusions && !inclusions.contains(descriptor.getLayoutId())) || exclusions.contains(descriptor.getLayoutId()) ) { continue; } try { final DremioMaterialization materialization = descriptor.getMaterializationFor(converter); if (materialization == null) { continue; } mapping.put(TablePath.of(descriptor.getPath()), descriptor); materializations.add(materialization); } catch (Throwable e) { logger.warn("failed to expand materialization {}", descriptor.getMaterializationId(), e); } } return materializations; } MaterializationList(final SqlConverter converter, final UserSession session, final MaterializationDescriptorProvider provider); }
MaterializationList implements MaterializationProvider { @VisibleForTesting protected List<DremioMaterialization> build(final MaterializationDescriptorProvider provider) { final Set<String> exclusions = Sets.newHashSet(session.getSubstitutionSettings().getExclusions()); final Set<String> inclusions = Sets.newHashSet(session.getSubstitutionSettings().getInclusions()); final boolean hasInclusions = !inclusions.isEmpty(); final List<DremioMaterialization> materializations = Lists.newArrayList(); for (final MaterializationDescriptor descriptor : provider.get()) { if( (hasInclusions && !inclusions.contains(descriptor.getLayoutId())) || exclusions.contains(descriptor.getLayoutId()) ) { continue; } try { final DremioMaterialization materialization = descriptor.getMaterializationFor(converter); if (materialization == null) { continue; } mapping.put(TablePath.of(descriptor.getPath()), descriptor); materializations.add(materialization); } catch (Throwable e) { logger.warn("failed to expand materialization {}", descriptor.getMaterializationId(), e); } } return materializations; } MaterializationList(final SqlConverter converter, final UserSession session, final MaterializationDescriptorProvider provider); @Override List<DremioMaterialization> getMaterializations(); @Override java.util.Optional<DremioMaterialization> getDefaultRawMaterialization(NamespaceKey path, List<String> vdsFields); Optional<MaterializationDescriptor> getDescriptor(final List<String> path); Optional<MaterializationDescriptor> getDescriptor(final TablePath path); }
MaterializationList implements MaterializationProvider { @VisibleForTesting protected List<DremioMaterialization> build(final MaterializationDescriptorProvider provider) { final Set<String> exclusions = Sets.newHashSet(session.getSubstitutionSettings().getExclusions()); final Set<String> inclusions = Sets.newHashSet(session.getSubstitutionSettings().getInclusions()); final boolean hasInclusions = !inclusions.isEmpty(); final List<DremioMaterialization> materializations = Lists.newArrayList(); for (final MaterializationDescriptor descriptor : provider.get()) { if( (hasInclusions && !inclusions.contains(descriptor.getLayoutId())) || exclusions.contains(descriptor.getLayoutId()) ) { continue; } try { final DremioMaterialization materialization = descriptor.getMaterializationFor(converter); if (materialization == null) { continue; } mapping.put(TablePath.of(descriptor.getPath()), descriptor); materializations.add(materialization); } catch (Throwable e) { logger.warn("failed to expand materialization {}", descriptor.getMaterializationId(), e); } } return materializations; } MaterializationList(final SqlConverter converter, final UserSession session, final MaterializationDescriptorProvider provider); @Override List<DremioMaterialization> getMaterializations(); @Override java.util.Optional<DremioMaterialization> getDefaultRawMaterialization(NamespaceKey path, List<String> vdsFields); Optional<MaterializationDescriptor> getDescriptor(final List<String> path); Optional<MaterializationDescriptor> getDescriptor(final TablePath path); }
@Test public void testAreRowTypesEqualIgnoresNames() { final RelDataType type1 = typeFactory.createStructType( asList( typeFactory.createSqlType(SqlTypeName.INTEGER), typeFactory.createSqlType(SqlTypeName.BIGINT), typeFactory.createSqlType(SqlTypeName.FLOAT), typeFactory.createSqlType(SqlTypeName.DOUBLE), typeFactory.createSqlType(SqlTypeName.DATE), typeFactory.createSqlType(SqlTypeName.TIMESTAMP), typeFactory.createSqlType(SqlTypeName.VARCHAR), typeFactory.createSqlType(SqlTypeName.BOOLEAN) ), asList("intC", "bigIntC", "floatC", "doubleC", "dateC", "tsC", "varcharC", "boolC") ); final RelDataType type2 = typeFactory.createStructType( asList( typeFactory.createSqlType(SqlTypeName.INTEGER), typeFactory.createSqlType(SqlTypeName.BIGINT), typeFactory.createSqlType(SqlTypeName.FLOAT), typeFactory.createSqlType(SqlTypeName.DOUBLE), typeFactory.createSqlType(SqlTypeName.DATE), typeFactory.createSqlType(SqlTypeName.TIMESTAMP), typeFactory.createSqlType(SqlTypeName.VARCHAR), typeFactory.createSqlType(SqlTypeName.BOOLEAN) ), asList("intC2", "bigIntC2", "floatC2", "doubleC2", "dateC2", "tsC2", "varcharC2", "boolC2") ); Assert.assertTrue(MaterializationExpander.areRowTypesEqual(type1, type2)); }
@VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; }
MaterializationExpander { @VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; } }
MaterializationExpander { @VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; } private MaterializationExpander(final SqlConverter parent); }
MaterializationExpander { @VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; } private MaterializationExpander(final SqlConverter parent); DremioMaterialization expand(MaterializationDescriptor descriptor); static RelNode deserializePlan(final byte[] planBytes, SqlConverter parent); static MaterializationExpander of(final SqlConverter parent); }
MaterializationExpander { @VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; } private MaterializationExpander(final SqlConverter parent); DremioMaterialization expand(MaterializationDescriptor descriptor); static RelNode deserializePlan(final byte[] planBytes, SqlConverter parent); static MaterializationExpander of(final SqlConverter parent); }
@Test public void testParseJsonPath() throws Exception { JsonPath p = JSONElementLocator.parsePath("value.a"); assertEquals(p.toString(), 1, p.size()); assertEquals(p.toString(), "a", p.last().asObject().getField()); }
public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); }
JSONElementLocator { public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); } }
JSONElementLocator { public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); } JSONElementLocator(String text); }
JSONElementLocator { public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); } JSONElementLocator(String text); static JsonPath parsePath(String path); Interval locatePath(JsonPath searchedPath); JsonSelection locate(int selectionStart, int selectionEnd); }
JSONElementLocator { public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); } JSONElementLocator(String text); static JsonPath parsePath(String path); Interval locatePath(JsonPath searchedPath); JsonSelection locate(int selectionStart, int selectionEnd); }
@Test public void testAreRowTypesEqualIgnoresNullability() { final RelDataType type1 = typeFactory.createStructType( asList( typeFactory.createSqlType(SqlTypeName.INTEGER), typeFactory.createSqlType(SqlTypeName.DOUBLE), typeFactory.createSqlType(SqlTypeName.TIMESTAMP), typeFactory.createSqlType(SqlTypeName.BOOLEAN) ), asList("intC", "doubleC", "tsC", "boolC") ); final RelDataType type2 = typeFactory.createStructType( asList( typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.INTEGER), true), typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.DOUBLE), true), typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.TIMESTAMP), true), typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.BOOLEAN), true) ), asList("intC2", "doubleC2", "tsC2", "boolC2") ); Assert.assertTrue(MaterializationExpander.areRowTypesEqual(type1, type2)); }
@VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; }
MaterializationExpander { @VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; } }
MaterializationExpander { @VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; } private MaterializationExpander(final SqlConverter parent); }
MaterializationExpander { @VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; } private MaterializationExpander(final SqlConverter parent); DremioMaterialization expand(MaterializationDescriptor descriptor); static RelNode deserializePlan(final byte[] planBytes, SqlConverter parent); static MaterializationExpander of(final SqlConverter parent); }
MaterializationExpander { @VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; } private MaterializationExpander(final SqlConverter parent); DremioMaterialization expand(MaterializationDescriptor descriptor); static RelNode deserializePlan(final byte[] planBytes, SqlConverter parent); static MaterializationExpander of(final SqlConverter parent); }
@Test public void testAreRowTypesEqualIgnoresAny() { final RelDataType type1 = typeFactory.createStructType( asList( typeFactory.createSqlType(SqlTypeName.INTEGER), typeFactory.createSqlType(SqlTypeName.BIGINT) ), asList("intC", "bigIntC") ); final RelDataType type2 = typeFactory.createStructType( asList( typeFactory.createSqlType(SqlTypeName.INTEGER), typeFactory.createSqlType(SqlTypeName.ANY) ), asList("intC", "anyC") ); Assert.assertTrue(MaterializationExpander.areRowTypesEqual(type1, type2)); }
@VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; }
MaterializationExpander { @VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; } }
MaterializationExpander { @VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; } private MaterializationExpander(final SqlConverter parent); }
MaterializationExpander { @VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; } private MaterializationExpander(final SqlConverter parent); DremioMaterialization expand(MaterializationDescriptor descriptor); static RelNode deserializePlan(final byte[] planBytes, SqlConverter parent); static MaterializationExpander of(final SqlConverter parent); }
MaterializationExpander { @VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; } private MaterializationExpander(final SqlConverter parent); DremioMaterialization expand(MaterializationDescriptor descriptor); static RelNode deserializePlan(final byte[] planBytes, SqlConverter parent); static MaterializationExpander of(final SqlConverter parent); }
@Test public void testAreRowTypesEqualMatchesCharVarchar() { final RelDataType type1 = typeFactory.createStructType( asList( typeFactory.createSqlType(SqlTypeName.CHAR), typeFactory.createSqlType(SqlTypeName.VARCHAR) ), asList("charC", "varcharC") ); final RelDataType type2 = typeFactory.createStructType( asList( typeFactory.createSqlType(SqlTypeName.VARCHAR), typeFactory.createSqlType(SqlTypeName.CHAR) ), asList("varcharC", "charC") ); Assert.assertTrue(MaterializationExpander.areRowTypesEqual(type1, type2)); }
@VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; }
MaterializationExpander { @VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; } }
MaterializationExpander { @VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; } private MaterializationExpander(final SqlConverter parent); }
MaterializationExpander { @VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; } private MaterializationExpander(final SqlConverter parent); DremioMaterialization expand(MaterializationDescriptor descriptor); static RelNode deserializePlan(final byte[] planBytes, SqlConverter parent); static MaterializationExpander of(final SqlConverter parent); }
MaterializationExpander { @VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; } private MaterializationExpander(final SqlConverter parent); DremioMaterialization expand(MaterializationDescriptor descriptor); static RelNode deserializePlan(final byte[] planBytes, SqlConverter parent); static MaterializationExpander of(final SqlConverter parent); }
@Test public void testTimestampPrecisionMismatch() { final RelDataTypeFactory calciteFactory = new org.apache.calcite.jdbc.JavaTypeFactoryImpl(); final RelDataType type1 = calciteFactory.createStructType( Collections.singletonList(calciteFactory.createSqlType(SqlTypeName.TIMESTAMP, 0)), Collections.singletonList("ts0") ); final RelDataType type2 = typeFactory.createStructType( Collections.singletonList(typeFactory.createSqlType(SqlTypeName.TIMESTAMP, 3)), Collections.singletonList("ts3") ); Assert.assertFalse(MaterializationExpander.areRowTypesEqual(type1, type2)); }
@VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; }
MaterializationExpander { @VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; } }
MaterializationExpander { @VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; } private MaterializationExpander(final SqlConverter parent); }
MaterializationExpander { @VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; } private MaterializationExpander(final SqlConverter parent); DremioMaterialization expand(MaterializationDescriptor descriptor); static RelNode deserializePlan(final byte[] planBytes, SqlConverter parent); static MaterializationExpander of(final SqlConverter parent); }
MaterializationExpander { @VisibleForTesting static boolean areRowTypesEqual(RelDataType rowType1, RelDataType rowType2) { if (rowType1 == rowType2) { return true; } if (rowType2.getFieldCount() != rowType1.getFieldCount()) { return false; } final List<RelDataTypeField> f1 = rowType1.getFieldList(); final List<RelDataTypeField> f2 = rowType2.getFieldList(); for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(f1, f2)) { final RelDataType type1 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.left.getType(), false); final RelDataType type2 = JavaTypeFactoryImpl.INSTANCE.createTypeWithNullability(pair.right.getType(), false); if (type1.equals(type2)) { continue; } if (type1.getSqlTypeName() == SqlTypeName.ANY || type2.getSqlTypeName() == SqlTypeName.ANY) { continue; } if (type1.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER && type2.getSqlTypeName().getFamily() == SqlTypeFamily.CHARACTER) { continue; } if (type1.getSqlTypeName() == SqlTypeName.DOUBLE && type2.getSqlTypeName() == SqlTypeName .DECIMAL || isSumAggOutput(type1, type2)) { continue; } return false; } return true; } private MaterializationExpander(final SqlConverter parent); DremioMaterialization expand(MaterializationDescriptor descriptor); static RelNode deserializePlan(final byte[] planBytes, SqlConverter parent); static MaterializationExpander of(final SqlConverter parent); }
@Test public void testTableTypesEmptyList() { assertSame(MetadataProviderConditions.ALWAYS_TRUE, MetadataProviderConditions.getTableTypePredicate(Collections.emptyList())); }
public static Predicate<String> getTableTypePredicate(List<String> tableTypeFilter) { return tableTypeFilter.isEmpty() ? ALWAYS_TRUE : ImmutableSet.copyOf(tableTypeFilter)::contains; }
MetadataProviderConditions { public static Predicate<String> getTableTypePredicate(List<String> tableTypeFilter) { return tableTypeFilter.isEmpty() ? ALWAYS_TRUE : ImmutableSet.copyOf(tableTypeFilter)::contains; } }
MetadataProviderConditions { public static Predicate<String> getTableTypePredicate(List<String> tableTypeFilter) { return tableTypeFilter.isEmpty() ? ALWAYS_TRUE : ImmutableSet.copyOf(tableTypeFilter)::contains; } private MetadataProviderConditions(); }
MetadataProviderConditions { public static Predicate<String> getTableTypePredicate(List<String> tableTypeFilter) { return tableTypeFilter.isEmpty() ? ALWAYS_TRUE : ImmutableSet.copyOf(tableTypeFilter)::contains; } private MetadataProviderConditions(); static Predicate<String> getTableTypePredicate(List<String> tableTypeFilter); static Predicate<String> getCatalogNamePredicate(LikeFilter filter); static Optional<SearchQuery> createConjunctiveQuery( LikeFilter schemaNameFilter, LikeFilter tableNameFilter ); }
MetadataProviderConditions { public static Predicate<String> getTableTypePredicate(List<String> tableTypeFilter) { return tableTypeFilter.isEmpty() ? ALWAYS_TRUE : ImmutableSet.copyOf(tableTypeFilter)::contains; } private MetadataProviderConditions(); static Predicate<String> getTableTypePredicate(List<String> tableTypeFilter); static Predicate<String> getCatalogNamePredicate(LikeFilter filter); static Optional<SearchQuery> createConjunctiveQuery( LikeFilter schemaNameFilter, LikeFilter tableNameFilter ); }
@Test public void testTableTypes() { Predicate<String> filter = MetadataProviderConditions.getTableTypePredicate(Arrays.asList("foo", "bar")); assertTrue(filter.test("foo")); assertTrue(filter.test("bar")); assertFalse(filter.test("baz")); assertFalse(filter.test("fooo")); assertFalse(filter.test("ofoo")); assertFalse(filter.test("FOO")); }
public static Predicate<String> getTableTypePredicate(List<String> tableTypeFilter) { return tableTypeFilter.isEmpty() ? ALWAYS_TRUE : ImmutableSet.copyOf(tableTypeFilter)::contains; }
MetadataProviderConditions { public static Predicate<String> getTableTypePredicate(List<String> tableTypeFilter) { return tableTypeFilter.isEmpty() ? ALWAYS_TRUE : ImmutableSet.copyOf(tableTypeFilter)::contains; } }
MetadataProviderConditions { public static Predicate<String> getTableTypePredicate(List<String> tableTypeFilter) { return tableTypeFilter.isEmpty() ? ALWAYS_TRUE : ImmutableSet.copyOf(tableTypeFilter)::contains; } private MetadataProviderConditions(); }
MetadataProviderConditions { public static Predicate<String> getTableTypePredicate(List<String> tableTypeFilter) { return tableTypeFilter.isEmpty() ? ALWAYS_TRUE : ImmutableSet.copyOf(tableTypeFilter)::contains; } private MetadataProviderConditions(); static Predicate<String> getTableTypePredicate(List<String> tableTypeFilter); static Predicate<String> getCatalogNamePredicate(LikeFilter filter); static Optional<SearchQuery> createConjunctiveQuery( LikeFilter schemaNameFilter, LikeFilter tableNameFilter ); }
MetadataProviderConditions { public static Predicate<String> getTableTypePredicate(List<String> tableTypeFilter) { return tableTypeFilter.isEmpty() ? ALWAYS_TRUE : ImmutableSet.copyOf(tableTypeFilter)::contains; } private MetadataProviderConditions(); static Predicate<String> getTableTypePredicate(List<String> tableTypeFilter); static Predicate<String> getCatalogNamePredicate(LikeFilter filter); static Optional<SearchQuery> createConjunctiveQuery( LikeFilter schemaNameFilter, LikeFilter tableNameFilter ); }
@Test public void testLikeFilterAlwaysTrue() { assertSame(MetadataProviderConditions.ALWAYS_TRUE, MetadataProviderConditions.getCatalogNamePredicate(null)); assertSame(MetadataProviderConditions.ALWAYS_TRUE, MetadataProviderConditions.getCatalogNamePredicate(newLikeFilter(null, "\\"))); assertSame(MetadataProviderConditions.ALWAYS_TRUE, MetadataProviderConditions.getCatalogNamePredicate(newLikeFilter("%", "\\"))); }
public static Predicate<String> getCatalogNamePredicate(LikeFilter filter) { if (filter == null || !filter.hasPattern() || SQL_LIKE_ANY_STRING_PATTERN.equals(filter.getPattern())) { return ALWAYS_TRUE; } final String patternString = RegexpUtil.sqlToRegexLike(filter.getPattern(), filter.hasEscape() ? filter.getEscape().charAt(0) : (char) 0); final Pattern pattern = Pattern.compile(patternString, Pattern.CASE_INSENSITIVE); return input -> pattern.matcher(input).matches(); }
MetadataProviderConditions { public static Predicate<String> getCatalogNamePredicate(LikeFilter filter) { if (filter == null || !filter.hasPattern() || SQL_LIKE_ANY_STRING_PATTERN.equals(filter.getPattern())) { return ALWAYS_TRUE; } final String patternString = RegexpUtil.sqlToRegexLike(filter.getPattern(), filter.hasEscape() ? filter.getEscape().charAt(0) : (char) 0); final Pattern pattern = Pattern.compile(patternString, Pattern.CASE_INSENSITIVE); return input -> pattern.matcher(input).matches(); } }
MetadataProviderConditions { public static Predicate<String> getCatalogNamePredicate(LikeFilter filter) { if (filter == null || !filter.hasPattern() || SQL_LIKE_ANY_STRING_PATTERN.equals(filter.getPattern())) { return ALWAYS_TRUE; } final String patternString = RegexpUtil.sqlToRegexLike(filter.getPattern(), filter.hasEscape() ? filter.getEscape().charAt(0) : (char) 0); final Pattern pattern = Pattern.compile(patternString, Pattern.CASE_INSENSITIVE); return input -> pattern.matcher(input).matches(); } private MetadataProviderConditions(); }
MetadataProviderConditions { public static Predicate<String> getCatalogNamePredicate(LikeFilter filter) { if (filter == null || !filter.hasPattern() || SQL_LIKE_ANY_STRING_PATTERN.equals(filter.getPattern())) { return ALWAYS_TRUE; } final String patternString = RegexpUtil.sqlToRegexLike(filter.getPattern(), filter.hasEscape() ? filter.getEscape().charAt(0) : (char) 0); final Pattern pattern = Pattern.compile(patternString, Pattern.CASE_INSENSITIVE); return input -> pattern.matcher(input).matches(); } private MetadataProviderConditions(); static Predicate<String> getTableTypePredicate(List<String> tableTypeFilter); static Predicate<String> getCatalogNamePredicate(LikeFilter filter); static Optional<SearchQuery> createConjunctiveQuery( LikeFilter schemaNameFilter, LikeFilter tableNameFilter ); }
MetadataProviderConditions { public static Predicate<String> getCatalogNamePredicate(LikeFilter filter) { if (filter == null || !filter.hasPattern() || SQL_LIKE_ANY_STRING_PATTERN.equals(filter.getPattern())) { return ALWAYS_TRUE; } final String patternString = RegexpUtil.sqlToRegexLike(filter.getPattern(), filter.hasEscape() ? filter.getEscape().charAt(0) : (char) 0); final Pattern pattern = Pattern.compile(patternString, Pattern.CASE_INSENSITIVE); return input -> pattern.matcher(input).matches(); } private MetadataProviderConditions(); static Predicate<String> getTableTypePredicate(List<String> tableTypeFilter); static Predicate<String> getCatalogNamePredicate(LikeFilter filter); static Optional<SearchQuery> createConjunctiveQuery( LikeFilter schemaNameFilter, LikeFilter tableNameFilter ); }
@Test public void testLikeFilter() { Predicate<String> filter = MetadataProviderConditions.getCatalogNamePredicate(newLikeFilter("abc", "\\")); assertTrue(filter.test("abc")); assertFalse(filter.test("abcd")); assertTrue(filter.test("ABC")); }
public static Predicate<String> getCatalogNamePredicate(LikeFilter filter) { if (filter == null || !filter.hasPattern() || SQL_LIKE_ANY_STRING_PATTERN.equals(filter.getPattern())) { return ALWAYS_TRUE; } final String patternString = RegexpUtil.sqlToRegexLike(filter.getPattern(), filter.hasEscape() ? filter.getEscape().charAt(0) : (char) 0); final Pattern pattern = Pattern.compile(patternString, Pattern.CASE_INSENSITIVE); return input -> pattern.matcher(input).matches(); }
MetadataProviderConditions { public static Predicate<String> getCatalogNamePredicate(LikeFilter filter) { if (filter == null || !filter.hasPattern() || SQL_LIKE_ANY_STRING_PATTERN.equals(filter.getPattern())) { return ALWAYS_TRUE; } final String patternString = RegexpUtil.sqlToRegexLike(filter.getPattern(), filter.hasEscape() ? filter.getEscape().charAt(0) : (char) 0); final Pattern pattern = Pattern.compile(patternString, Pattern.CASE_INSENSITIVE); return input -> pattern.matcher(input).matches(); } }
MetadataProviderConditions { public static Predicate<String> getCatalogNamePredicate(LikeFilter filter) { if (filter == null || !filter.hasPattern() || SQL_LIKE_ANY_STRING_PATTERN.equals(filter.getPattern())) { return ALWAYS_TRUE; } final String patternString = RegexpUtil.sqlToRegexLike(filter.getPattern(), filter.hasEscape() ? filter.getEscape().charAt(0) : (char) 0); final Pattern pattern = Pattern.compile(patternString, Pattern.CASE_INSENSITIVE); return input -> pattern.matcher(input).matches(); } private MetadataProviderConditions(); }
MetadataProviderConditions { public static Predicate<String> getCatalogNamePredicate(LikeFilter filter) { if (filter == null || !filter.hasPattern() || SQL_LIKE_ANY_STRING_PATTERN.equals(filter.getPattern())) { return ALWAYS_TRUE; } final String patternString = RegexpUtil.sqlToRegexLike(filter.getPattern(), filter.hasEscape() ? filter.getEscape().charAt(0) : (char) 0); final Pattern pattern = Pattern.compile(patternString, Pattern.CASE_INSENSITIVE); return input -> pattern.matcher(input).matches(); } private MetadataProviderConditions(); static Predicate<String> getTableTypePredicate(List<String> tableTypeFilter); static Predicate<String> getCatalogNamePredicate(LikeFilter filter); static Optional<SearchQuery> createConjunctiveQuery( LikeFilter schemaNameFilter, LikeFilter tableNameFilter ); }
MetadataProviderConditions { public static Predicate<String> getCatalogNamePredicate(LikeFilter filter) { if (filter == null || !filter.hasPattern() || SQL_LIKE_ANY_STRING_PATTERN.equals(filter.getPattern())) { return ALWAYS_TRUE; } final String patternString = RegexpUtil.sqlToRegexLike(filter.getPattern(), filter.hasEscape() ? filter.getEscape().charAt(0) : (char) 0); final Pattern pattern = Pattern.compile(patternString, Pattern.CASE_INSENSITIVE); return input -> pattern.matcher(input).matches(); } private MetadataProviderConditions(); static Predicate<String> getTableTypePredicate(List<String> tableTypeFilter); static Predicate<String> getCatalogNamePredicate(LikeFilter filter); static Optional<SearchQuery> createConjunctiveQuery( LikeFilter schemaNameFilter, LikeFilter tableNameFilter ); }
@Test public void testLikeFilterMixedCase() { Predicate<String> filter = MetadataProviderConditions.getCatalogNamePredicate(newLikeFilter("AbC", "\\")); assertTrue(filter.test("abc")); assertFalse(filter.test("abcd")); assertFalse(filter.test("aabc")); assertTrue(filter.test("ABC")); }
public static Predicate<String> getCatalogNamePredicate(LikeFilter filter) { if (filter == null || !filter.hasPattern() || SQL_LIKE_ANY_STRING_PATTERN.equals(filter.getPattern())) { return ALWAYS_TRUE; } final String patternString = RegexpUtil.sqlToRegexLike(filter.getPattern(), filter.hasEscape() ? filter.getEscape().charAt(0) : (char) 0); final Pattern pattern = Pattern.compile(patternString, Pattern.CASE_INSENSITIVE); return input -> pattern.matcher(input).matches(); }
MetadataProviderConditions { public static Predicate<String> getCatalogNamePredicate(LikeFilter filter) { if (filter == null || !filter.hasPattern() || SQL_LIKE_ANY_STRING_PATTERN.equals(filter.getPattern())) { return ALWAYS_TRUE; } final String patternString = RegexpUtil.sqlToRegexLike(filter.getPattern(), filter.hasEscape() ? filter.getEscape().charAt(0) : (char) 0); final Pattern pattern = Pattern.compile(patternString, Pattern.CASE_INSENSITIVE); return input -> pattern.matcher(input).matches(); } }
MetadataProviderConditions { public static Predicate<String> getCatalogNamePredicate(LikeFilter filter) { if (filter == null || !filter.hasPattern() || SQL_LIKE_ANY_STRING_PATTERN.equals(filter.getPattern())) { return ALWAYS_TRUE; } final String patternString = RegexpUtil.sqlToRegexLike(filter.getPattern(), filter.hasEscape() ? filter.getEscape().charAt(0) : (char) 0); final Pattern pattern = Pattern.compile(patternString, Pattern.CASE_INSENSITIVE); return input -> pattern.matcher(input).matches(); } private MetadataProviderConditions(); }
MetadataProviderConditions { public static Predicate<String> getCatalogNamePredicate(LikeFilter filter) { if (filter == null || !filter.hasPattern() || SQL_LIKE_ANY_STRING_PATTERN.equals(filter.getPattern())) { return ALWAYS_TRUE; } final String patternString = RegexpUtil.sqlToRegexLike(filter.getPattern(), filter.hasEscape() ? filter.getEscape().charAt(0) : (char) 0); final Pattern pattern = Pattern.compile(patternString, Pattern.CASE_INSENSITIVE); return input -> pattern.matcher(input).matches(); } private MetadataProviderConditions(); static Predicate<String> getTableTypePredicate(List<String> tableTypeFilter); static Predicate<String> getCatalogNamePredicate(LikeFilter filter); static Optional<SearchQuery> createConjunctiveQuery( LikeFilter schemaNameFilter, LikeFilter tableNameFilter ); }
MetadataProviderConditions { public static Predicate<String> getCatalogNamePredicate(LikeFilter filter) { if (filter == null || !filter.hasPattern() || SQL_LIKE_ANY_STRING_PATTERN.equals(filter.getPattern())) { return ALWAYS_TRUE; } final String patternString = RegexpUtil.sqlToRegexLike(filter.getPattern(), filter.hasEscape() ? filter.getEscape().charAt(0) : (char) 0); final Pattern pattern = Pattern.compile(patternString, Pattern.CASE_INSENSITIVE); return input -> pattern.matcher(input).matches(); } private MetadataProviderConditions(); static Predicate<String> getTableTypePredicate(List<String> tableTypeFilter); static Predicate<String> getCatalogNamePredicate(LikeFilter filter); static Optional<SearchQuery> createConjunctiveQuery( LikeFilter schemaNameFilter, LikeFilter tableNameFilter ); }
@Test public void testCreateFilterAlwaysTrue() { assertFalse(MetadataProviderConditions.createConjunctiveQuery(null, null).isPresent()); assertFalse(MetadataProviderConditions.createConjunctiveQuery(null, newLikeFilter("%", null)).isPresent()); assertFalse(MetadataProviderConditions.createConjunctiveQuery(newLikeFilter("%", null), null).isPresent()); assertFalse(MetadataProviderConditions.createConjunctiveQuery(newLikeFilter("%", null), newLikeFilter("%", null)) .isPresent()); }
public static Optional<SearchQuery> createConjunctiveQuery( LikeFilter schemaNameFilter, LikeFilter tableNameFilter ) { final Optional<SearchQuery> schemaNameQuery = createLikeQuery(DatasetIndexKeys.UNQUOTED_SCHEMA.getIndexFieldName(), schemaNameFilter); final Optional<SearchQuery> tableNameQuery = createLikeQuery(DatasetIndexKeys.UNQUOTED_NAME.getIndexFieldName(), tableNameFilter); if (!schemaNameQuery.isPresent()) { return tableNameQuery; } if (!tableNameQuery.isPresent()) { return schemaNameQuery; } return Optional.of(SearchQuery.newBuilder() .setAnd(SearchQuery.And.newBuilder() .addClauses(schemaNameQuery.get()) .addClauses(tableNameQuery.get())) .build()); }
MetadataProviderConditions { public static Optional<SearchQuery> createConjunctiveQuery( LikeFilter schemaNameFilter, LikeFilter tableNameFilter ) { final Optional<SearchQuery> schemaNameQuery = createLikeQuery(DatasetIndexKeys.UNQUOTED_SCHEMA.getIndexFieldName(), schemaNameFilter); final Optional<SearchQuery> tableNameQuery = createLikeQuery(DatasetIndexKeys.UNQUOTED_NAME.getIndexFieldName(), tableNameFilter); if (!schemaNameQuery.isPresent()) { return tableNameQuery; } if (!tableNameQuery.isPresent()) { return schemaNameQuery; } return Optional.of(SearchQuery.newBuilder() .setAnd(SearchQuery.And.newBuilder() .addClauses(schemaNameQuery.get()) .addClauses(tableNameQuery.get())) .build()); } }
MetadataProviderConditions { public static Optional<SearchQuery> createConjunctiveQuery( LikeFilter schemaNameFilter, LikeFilter tableNameFilter ) { final Optional<SearchQuery> schemaNameQuery = createLikeQuery(DatasetIndexKeys.UNQUOTED_SCHEMA.getIndexFieldName(), schemaNameFilter); final Optional<SearchQuery> tableNameQuery = createLikeQuery(DatasetIndexKeys.UNQUOTED_NAME.getIndexFieldName(), tableNameFilter); if (!schemaNameQuery.isPresent()) { return tableNameQuery; } if (!tableNameQuery.isPresent()) { return schemaNameQuery; } return Optional.of(SearchQuery.newBuilder() .setAnd(SearchQuery.And.newBuilder() .addClauses(schemaNameQuery.get()) .addClauses(tableNameQuery.get())) .build()); } private MetadataProviderConditions(); }
MetadataProviderConditions { public static Optional<SearchQuery> createConjunctiveQuery( LikeFilter schemaNameFilter, LikeFilter tableNameFilter ) { final Optional<SearchQuery> schemaNameQuery = createLikeQuery(DatasetIndexKeys.UNQUOTED_SCHEMA.getIndexFieldName(), schemaNameFilter); final Optional<SearchQuery> tableNameQuery = createLikeQuery(DatasetIndexKeys.UNQUOTED_NAME.getIndexFieldName(), tableNameFilter); if (!schemaNameQuery.isPresent()) { return tableNameQuery; } if (!tableNameQuery.isPresent()) { return schemaNameQuery; } return Optional.of(SearchQuery.newBuilder() .setAnd(SearchQuery.And.newBuilder() .addClauses(schemaNameQuery.get()) .addClauses(tableNameQuery.get())) .build()); } private MetadataProviderConditions(); static Predicate<String> getTableTypePredicate(List<String> tableTypeFilter); static Predicate<String> getCatalogNamePredicate(LikeFilter filter); static Optional<SearchQuery> createConjunctiveQuery( LikeFilter schemaNameFilter, LikeFilter tableNameFilter ); }
MetadataProviderConditions { public static Optional<SearchQuery> createConjunctiveQuery( LikeFilter schemaNameFilter, LikeFilter tableNameFilter ) { final Optional<SearchQuery> schemaNameQuery = createLikeQuery(DatasetIndexKeys.UNQUOTED_SCHEMA.getIndexFieldName(), schemaNameFilter); final Optional<SearchQuery> tableNameQuery = createLikeQuery(DatasetIndexKeys.UNQUOTED_NAME.getIndexFieldName(), tableNameFilter); if (!schemaNameQuery.isPresent()) { return tableNameQuery; } if (!tableNameQuery.isPresent()) { return schemaNameQuery; } return Optional.of(SearchQuery.newBuilder() .setAnd(SearchQuery.And.newBuilder() .addClauses(schemaNameQuery.get()) .addClauses(tableNameQuery.get())) .build()); } private MetadataProviderConditions(); static Predicate<String> getTableTypePredicate(List<String> tableTypeFilter); static Predicate<String> getCatalogNamePredicate(LikeFilter filter); static Optional<SearchQuery> createConjunctiveQuery( LikeFilter schemaNameFilter, LikeFilter tableNameFilter ); }
@Test public void testParseJsonPath2() throws Exception { JsonPath p = JSONElementLocator.parsePath("value.a.b.c"); assertEquals(p.toString(), 3, p.size()); assertEquals(new JsonPath(new ObjectJsonPathElement("a"), new ObjectJsonPathElement("b"), new ObjectJsonPathElement("c")), p); }
public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); }
JSONElementLocator { public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); } }
JSONElementLocator { public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); } JSONElementLocator(String text); }
JSONElementLocator { public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); } JSONElementLocator(String text); static JsonPath parsePath(String path); Interval locatePath(JsonPath searchedPath); JsonSelection locate(int selectionStart, int selectionEnd); }
JSONElementLocator { public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); } JSONElementLocator(String text); static JsonPath parsePath(String path); Interval locatePath(JsonPath searchedPath); JsonSelection locate(int selectionStart, int selectionEnd); }
@Test public void testCreationOfValidator() { SabotContext sabotContext = mock(SabotContext.class); FunctionImplementationRegistry functionImplementationRegistry = mock(FunctionImplementationRegistry.class); CatalogService catalogService = mock(CatalogService.class); Catalog catalog = mock(Catalog.class); ProjectOptionManager mockOptions = mock(ProjectOptionManager.class); when(mockOptions.getOptionValidatorListing()).thenReturn(mock(OptionValidatorListing.class)); when(sabotContext.getFunctionImplementationRegistry()).thenReturn(functionImplementationRegistry); when(sabotContext.getCatalogService()).thenReturn(catalogService); when(sabotContext.getCatalogService().getCatalog(any(MetadataRequestOptions.class))).thenReturn(catalog); OptionValue value1 = OptionValue.createBoolean(OptionValue.OptionType.SYSTEM, PlannerSettings.ENABLE_DECIMAL_V2_KEY, false); OptionValue value2 = OptionValue.createLong(OptionValue.OptionType.SYSTEM, UserSession.MAX_METADATA_COUNT.getOptionName(), 0); OptionList optionList = new OptionList(); optionList.add(value1); optionList.add(value2); when(mockOptions.getOption(PlannerSettings.ENABLE_DECIMAL_V2_KEY)).thenReturn(value1); when(mockOptions.getOption(UserSession.MAX_METADATA_COUNT.getOptionName())).thenReturn(value2); when(mockOptions.getNonDefaultOptions()).thenReturn(optionList); SQLAnalyzer sqlAnalyzer = SQLAnalyzerFactory.createSQLAnalyzer(SystemUser.SYSTEM_USERNAME, sabotContext, null, true, mockOptions); SqlValidatorWithHints validator = sqlAnalyzer.validator; assertTrue(validator instanceof SqlAdvisorValidator); sqlAnalyzer = SQLAnalyzerFactory.createSQLAnalyzer(SystemUser.SYSTEM_USERNAME, sabotContext, null, false, mockOptions); validator = sqlAnalyzer.validator; assertTrue(validator instanceof SqlValidatorImpl); }
public static SQLAnalyzer createSQLAnalyzer(final String username, final SabotContext sabotContext, final List<String> context, final boolean createForSqlSuggestions, ProjectOptionManager projectOptionManager) { final ViewExpansionContext viewExpansionContext = new ViewExpansionContext(username); final OptionManager optionManager = OptionManagerWrapper.Builder.newBuilder() .withOptionManager(new DefaultOptionManager(sabotContext.getOptionValidatorListing())) .withOptionManager(new EagerCachingOptionManager(projectOptionManager)) .withOptionManager(new QueryOptionManager(sabotContext.getOptionValidatorListing())) .build(); final NamespaceKey defaultSchemaPath = context == null ? null : new NamespaceKey(context); final SchemaConfig newSchemaConfig = SchemaConfig.newBuilder(username) .defaultSchema(defaultSchemaPath) .optionManager(optionManager) .setViewExpansionContext(viewExpansionContext) .build(); Catalog catalog = sabotContext.getCatalogService() .getCatalog(MetadataRequestOptions.of(newSchemaConfig)); JavaTypeFactory typeFactory = JavaTypeFactoryImpl.INSTANCE; DremioCatalogReader catalogReader = new DremioCatalogReader(catalog, typeFactory); FunctionImplementationRegistry functionImplementationRegistry = optionManager.getOption (PlannerSettings.ENABLE_DECIMAL_V2_KEY).getBoolVal() ? sabotContext.getDecimalFunctionImplementationRegistry() : sabotContext.getFunctionImplementationRegistry(); OperatorTable opTable = new OperatorTable(functionImplementationRegistry); SqlOperatorTable chainedOpTable = new ChainedSqlOperatorTable(ImmutableList.<SqlOperatorTable>of(opTable, catalogReader)); SqlValidatorWithHints validator = createForSqlSuggestions ? new SqlAdvisorValidator(chainedOpTable, catalogReader, typeFactory, DremioSqlConformance.INSTANCE) : SqlValidatorUtil.newValidator(chainedOpTable, catalogReader, typeFactory, DremioSqlConformance.INSTANCE); return new SQLAnalyzer(validator); }
SQLAnalyzerFactory { public static SQLAnalyzer createSQLAnalyzer(final String username, final SabotContext sabotContext, final List<String> context, final boolean createForSqlSuggestions, ProjectOptionManager projectOptionManager) { final ViewExpansionContext viewExpansionContext = new ViewExpansionContext(username); final OptionManager optionManager = OptionManagerWrapper.Builder.newBuilder() .withOptionManager(new DefaultOptionManager(sabotContext.getOptionValidatorListing())) .withOptionManager(new EagerCachingOptionManager(projectOptionManager)) .withOptionManager(new QueryOptionManager(sabotContext.getOptionValidatorListing())) .build(); final NamespaceKey defaultSchemaPath = context == null ? null : new NamespaceKey(context); final SchemaConfig newSchemaConfig = SchemaConfig.newBuilder(username) .defaultSchema(defaultSchemaPath) .optionManager(optionManager) .setViewExpansionContext(viewExpansionContext) .build(); Catalog catalog = sabotContext.getCatalogService() .getCatalog(MetadataRequestOptions.of(newSchemaConfig)); JavaTypeFactory typeFactory = JavaTypeFactoryImpl.INSTANCE; DremioCatalogReader catalogReader = new DremioCatalogReader(catalog, typeFactory); FunctionImplementationRegistry functionImplementationRegistry = optionManager.getOption (PlannerSettings.ENABLE_DECIMAL_V2_KEY).getBoolVal() ? sabotContext.getDecimalFunctionImplementationRegistry() : sabotContext.getFunctionImplementationRegistry(); OperatorTable opTable = new OperatorTable(functionImplementationRegistry); SqlOperatorTable chainedOpTable = new ChainedSqlOperatorTable(ImmutableList.<SqlOperatorTable>of(opTable, catalogReader)); SqlValidatorWithHints validator = createForSqlSuggestions ? new SqlAdvisorValidator(chainedOpTable, catalogReader, typeFactory, DremioSqlConformance.INSTANCE) : SqlValidatorUtil.newValidator(chainedOpTable, catalogReader, typeFactory, DremioSqlConformance.INSTANCE); return new SQLAnalyzer(validator); } }
SQLAnalyzerFactory { public static SQLAnalyzer createSQLAnalyzer(final String username, final SabotContext sabotContext, final List<String> context, final boolean createForSqlSuggestions, ProjectOptionManager projectOptionManager) { final ViewExpansionContext viewExpansionContext = new ViewExpansionContext(username); final OptionManager optionManager = OptionManagerWrapper.Builder.newBuilder() .withOptionManager(new DefaultOptionManager(sabotContext.getOptionValidatorListing())) .withOptionManager(new EagerCachingOptionManager(projectOptionManager)) .withOptionManager(new QueryOptionManager(sabotContext.getOptionValidatorListing())) .build(); final NamespaceKey defaultSchemaPath = context == null ? null : new NamespaceKey(context); final SchemaConfig newSchemaConfig = SchemaConfig.newBuilder(username) .defaultSchema(defaultSchemaPath) .optionManager(optionManager) .setViewExpansionContext(viewExpansionContext) .build(); Catalog catalog = sabotContext.getCatalogService() .getCatalog(MetadataRequestOptions.of(newSchemaConfig)); JavaTypeFactory typeFactory = JavaTypeFactoryImpl.INSTANCE; DremioCatalogReader catalogReader = new DremioCatalogReader(catalog, typeFactory); FunctionImplementationRegistry functionImplementationRegistry = optionManager.getOption (PlannerSettings.ENABLE_DECIMAL_V2_KEY).getBoolVal() ? sabotContext.getDecimalFunctionImplementationRegistry() : sabotContext.getFunctionImplementationRegistry(); OperatorTable opTable = new OperatorTable(functionImplementationRegistry); SqlOperatorTable chainedOpTable = new ChainedSqlOperatorTable(ImmutableList.<SqlOperatorTable>of(opTable, catalogReader)); SqlValidatorWithHints validator = createForSqlSuggestions ? new SqlAdvisorValidator(chainedOpTable, catalogReader, typeFactory, DremioSqlConformance.INSTANCE) : SqlValidatorUtil.newValidator(chainedOpTable, catalogReader, typeFactory, DremioSqlConformance.INSTANCE); return new SQLAnalyzer(validator); } }
SQLAnalyzerFactory { public static SQLAnalyzer createSQLAnalyzer(final String username, final SabotContext sabotContext, final List<String> context, final boolean createForSqlSuggestions, ProjectOptionManager projectOptionManager) { final ViewExpansionContext viewExpansionContext = new ViewExpansionContext(username); final OptionManager optionManager = OptionManagerWrapper.Builder.newBuilder() .withOptionManager(new DefaultOptionManager(sabotContext.getOptionValidatorListing())) .withOptionManager(new EagerCachingOptionManager(projectOptionManager)) .withOptionManager(new QueryOptionManager(sabotContext.getOptionValidatorListing())) .build(); final NamespaceKey defaultSchemaPath = context == null ? null : new NamespaceKey(context); final SchemaConfig newSchemaConfig = SchemaConfig.newBuilder(username) .defaultSchema(defaultSchemaPath) .optionManager(optionManager) .setViewExpansionContext(viewExpansionContext) .build(); Catalog catalog = sabotContext.getCatalogService() .getCatalog(MetadataRequestOptions.of(newSchemaConfig)); JavaTypeFactory typeFactory = JavaTypeFactoryImpl.INSTANCE; DremioCatalogReader catalogReader = new DremioCatalogReader(catalog, typeFactory); FunctionImplementationRegistry functionImplementationRegistry = optionManager.getOption (PlannerSettings.ENABLE_DECIMAL_V2_KEY).getBoolVal() ? sabotContext.getDecimalFunctionImplementationRegistry() : sabotContext.getFunctionImplementationRegistry(); OperatorTable opTable = new OperatorTable(functionImplementationRegistry); SqlOperatorTable chainedOpTable = new ChainedSqlOperatorTable(ImmutableList.<SqlOperatorTable>of(opTable, catalogReader)); SqlValidatorWithHints validator = createForSqlSuggestions ? new SqlAdvisorValidator(chainedOpTable, catalogReader, typeFactory, DremioSqlConformance.INSTANCE) : SqlValidatorUtil.newValidator(chainedOpTable, catalogReader, typeFactory, DremioSqlConformance.INSTANCE); return new SQLAnalyzer(validator); } static SQLAnalyzer createSQLAnalyzer(final String username, final SabotContext sabotContext, final List<String> context, final boolean createForSqlSuggestions, ProjectOptionManager projectOptionManager); }
SQLAnalyzerFactory { public static SQLAnalyzer createSQLAnalyzer(final String username, final SabotContext sabotContext, final List<String> context, final boolean createForSqlSuggestions, ProjectOptionManager projectOptionManager) { final ViewExpansionContext viewExpansionContext = new ViewExpansionContext(username); final OptionManager optionManager = OptionManagerWrapper.Builder.newBuilder() .withOptionManager(new DefaultOptionManager(sabotContext.getOptionValidatorListing())) .withOptionManager(new EagerCachingOptionManager(projectOptionManager)) .withOptionManager(new QueryOptionManager(sabotContext.getOptionValidatorListing())) .build(); final NamespaceKey defaultSchemaPath = context == null ? null : new NamespaceKey(context); final SchemaConfig newSchemaConfig = SchemaConfig.newBuilder(username) .defaultSchema(defaultSchemaPath) .optionManager(optionManager) .setViewExpansionContext(viewExpansionContext) .build(); Catalog catalog = sabotContext.getCatalogService() .getCatalog(MetadataRequestOptions.of(newSchemaConfig)); JavaTypeFactory typeFactory = JavaTypeFactoryImpl.INSTANCE; DremioCatalogReader catalogReader = new DremioCatalogReader(catalog, typeFactory); FunctionImplementationRegistry functionImplementationRegistry = optionManager.getOption (PlannerSettings.ENABLE_DECIMAL_V2_KEY).getBoolVal() ? sabotContext.getDecimalFunctionImplementationRegistry() : sabotContext.getFunctionImplementationRegistry(); OperatorTable opTable = new OperatorTable(functionImplementationRegistry); SqlOperatorTable chainedOpTable = new ChainedSqlOperatorTable(ImmutableList.<SqlOperatorTable>of(opTable, catalogReader)); SqlValidatorWithHints validator = createForSqlSuggestions ? new SqlAdvisorValidator(chainedOpTable, catalogReader, typeFactory, DremioSqlConformance.INSTANCE) : SqlValidatorUtil.newValidator(chainedOpTable, catalogReader, typeFactory, DremioSqlConformance.INSTANCE); return new SQLAnalyzer(validator); } static SQLAnalyzer createSQLAnalyzer(final String username, final SabotContext sabotContext, final List<String> context, final boolean createForSqlSuggestions, ProjectOptionManager projectOptionManager); }
@Test(expected = UserException.class) public void testFailMultipleQueries() { ParserConfig config = new ParserConfig(ParserConfig.QUOTING, 100); SqlConverter.parseSingleStatementImpl("select * from t1; select * from t2", config, false); }
@VisibleForTesting static SqlNode parseSingleStatementImpl(String sql, ParserConfig parserConfig, boolean isInnerQuery) { SqlNodeList list = parseMultipleStatementsImpl(sql, parserConfig, isInnerQuery); if (list.size() > 1) { SqlParserPos pos = list.get(1).getParserPosition(); int col = pos.getColumnNum(); String first = sql.substring(0, col); String second = sql.substring(col-1); UserException.Builder builder = UserException.parseError(); builder.message("Unable to parse multiple queries. First query is %s. Rest of submission is %s", first, second); throw builder.buildSilently(); } SqlNode newNode = list.get(0).accept(STRING_LITERAL_CONVERTER); return newNode; }
SqlConverter { @VisibleForTesting static SqlNode parseSingleStatementImpl(String sql, ParserConfig parserConfig, boolean isInnerQuery) { SqlNodeList list = parseMultipleStatementsImpl(sql, parserConfig, isInnerQuery); if (list.size() > 1) { SqlParserPos pos = list.get(1).getParserPosition(); int col = pos.getColumnNum(); String first = sql.substring(0, col); String second = sql.substring(col-1); UserException.Builder builder = UserException.parseError(); builder.message("Unable to parse multiple queries. First query is %s. Rest of submission is %s", first, second); throw builder.buildSilently(); } SqlNode newNode = list.get(0).accept(STRING_LITERAL_CONVERTER); return newNode; } }
SqlConverter { @VisibleForTesting static SqlNode parseSingleStatementImpl(String sql, ParserConfig parserConfig, boolean isInnerQuery) { SqlNodeList list = parseMultipleStatementsImpl(sql, parserConfig, isInnerQuery); if (list.size() > 1) { SqlParserPos pos = list.get(1).getParserPosition(); int col = pos.getColumnNum(); String first = sql.substring(0, col); String second = sql.substring(col-1); UserException.Builder builder = UserException.parseError(); builder.message("Unable to parse multiple queries. First query is %s. Rest of submission is %s", first, second); throw builder.buildSilently(); } SqlNode newNode = list.get(0).accept(STRING_LITERAL_CONVERTER); return newNode; } SqlConverter( final PlannerSettings settings, final SqlOperatorTable operatorTable, final FunctionContext functionContext, final MaterializationDescriptorProvider materializationProvider, final FunctionImplementationRegistry functions, final UserSession session, final AttemptObserver observer, final Catalog catalog, final SubstitutionProviderFactory factory, final SabotConfig config, final ScanResult scanResult ); SqlConverter(SqlConverter parent, DremioCatalogReader catalog); }
SqlConverter { @VisibleForTesting static SqlNode parseSingleStatementImpl(String sql, ParserConfig parserConfig, boolean isInnerQuery) { SqlNodeList list = parseMultipleStatementsImpl(sql, parserConfig, isInnerQuery); if (list.size() > 1) { SqlParserPos pos = list.get(1).getParserPosition(); int col = pos.getColumnNum(); String first = sql.substring(0, col); String second = sql.substring(col-1); UserException.Builder builder = UserException.parseError(); builder.message("Unable to parse multiple queries. First query is %s. Rest of submission is %s", first, second); throw builder.buildSilently(); } SqlNode newNode = list.get(0).accept(STRING_LITERAL_CONVERTER); return newNode; } SqlConverter( final PlannerSettings settings, final SqlOperatorTable operatorTable, final FunctionContext functionContext, final MaterializationDescriptorProvider materializationProvider, final FunctionImplementationRegistry functions, final UserSession session, final AttemptObserver observer, final Catalog catalog, final SubstitutionProviderFactory factory, final SabotConfig config, final ScanResult scanResult ); SqlConverter(SqlConverter parent, DremioCatalogReader catalog); SqlNodeList parseMultipleStatements(String sql); SqlNode parse(String sql); ViewExpansionContext getViewExpansionContext(); UserSession getSession(); SqlNode validate(final SqlNode parsedNode); RelDataType getValidatedRowType(String sql); FunctionImplementationRegistry getFunctionImplementationRegistry(); PlannerSettings getSettings(); RelDataType getOutputType(SqlNode validatedNode); JavaTypeFactory getTypeFactory(); SqlOperatorTable getOpTab(); RelOptCostFactory getCostFactory(); FunctionContext getFunctionContext(); DremioCatalogReader getCatalogReader(); SqlParser.Config getParserConfig(); AttemptObserver getObserver(); MaterializationList getMaterializations(); int getNestingLevel(); RelOptCluster getCluster(); AccelerationAwareSubstitutionProvider getSubstitutionProvider(); RelSerializerFactory getSerializerFactory(); SabotConfig getConfig(); RelRootPlus toConvertibleRelRoot(final SqlNode validatedNode, boolean expand, boolean flatten); }
SqlConverter { @VisibleForTesting static SqlNode parseSingleStatementImpl(String sql, ParserConfig parserConfig, boolean isInnerQuery) { SqlNodeList list = parseMultipleStatementsImpl(sql, parserConfig, isInnerQuery); if (list.size() > 1) { SqlParserPos pos = list.get(1).getParserPosition(); int col = pos.getColumnNum(); String first = sql.substring(0, col); String second = sql.substring(col-1); UserException.Builder builder = UserException.parseError(); builder.message("Unable to parse multiple queries. First query is %s. Rest of submission is %s", first, second); throw builder.buildSilently(); } SqlNode newNode = list.get(0).accept(STRING_LITERAL_CONVERTER); return newNode; } SqlConverter( final PlannerSettings settings, final SqlOperatorTable operatorTable, final FunctionContext functionContext, final MaterializationDescriptorProvider materializationProvider, final FunctionImplementationRegistry functions, final UserSession session, final AttemptObserver observer, final Catalog catalog, final SubstitutionProviderFactory factory, final SabotConfig config, final ScanResult scanResult ); SqlConverter(SqlConverter parent, DremioCatalogReader catalog); SqlNodeList parseMultipleStatements(String sql); SqlNode parse(String sql); ViewExpansionContext getViewExpansionContext(); UserSession getSession(); SqlNode validate(final SqlNode parsedNode); RelDataType getValidatedRowType(String sql); FunctionImplementationRegistry getFunctionImplementationRegistry(); PlannerSettings getSettings(); RelDataType getOutputType(SqlNode validatedNode); JavaTypeFactory getTypeFactory(); SqlOperatorTable getOpTab(); RelOptCostFactory getCostFactory(); FunctionContext getFunctionContext(); DremioCatalogReader getCatalogReader(); SqlParser.Config getParserConfig(); AttemptObserver getObserver(); MaterializationList getMaterializations(); int getNestingLevel(); RelOptCluster getCluster(); AccelerationAwareSubstitutionProvider getSubstitutionProvider(); RelSerializerFactory getSerializerFactory(); SabotConfig getConfig(); RelRootPlus toConvertibleRelRoot(final SqlNode validatedNode, boolean expand, boolean flatten); static final SqlShuttle STRING_LITERAL_CONVERTER; }
@Test public void testPassSemicolon() { ParserConfig config = new ParserConfig(ParserConfig.QUOTING, 100); SqlNode node = SqlConverter.parseSingleStatementImpl("select * from t1;", config, false); assertEquals("SELECT *\n" + "FROM \"t1\"", node.toSqlString(CalciteSqlDialect.DEFAULT).getSql()); }
@VisibleForTesting static SqlNode parseSingleStatementImpl(String sql, ParserConfig parserConfig, boolean isInnerQuery) { SqlNodeList list = parseMultipleStatementsImpl(sql, parserConfig, isInnerQuery); if (list.size() > 1) { SqlParserPos pos = list.get(1).getParserPosition(); int col = pos.getColumnNum(); String first = sql.substring(0, col); String second = sql.substring(col-1); UserException.Builder builder = UserException.parseError(); builder.message("Unable to parse multiple queries. First query is %s. Rest of submission is %s", first, second); throw builder.buildSilently(); } SqlNode newNode = list.get(0).accept(STRING_LITERAL_CONVERTER); return newNode; }
SqlConverter { @VisibleForTesting static SqlNode parseSingleStatementImpl(String sql, ParserConfig parserConfig, boolean isInnerQuery) { SqlNodeList list = parseMultipleStatementsImpl(sql, parserConfig, isInnerQuery); if (list.size() > 1) { SqlParserPos pos = list.get(1).getParserPosition(); int col = pos.getColumnNum(); String first = sql.substring(0, col); String second = sql.substring(col-1); UserException.Builder builder = UserException.parseError(); builder.message("Unable to parse multiple queries. First query is %s. Rest of submission is %s", first, second); throw builder.buildSilently(); } SqlNode newNode = list.get(0).accept(STRING_LITERAL_CONVERTER); return newNode; } }
SqlConverter { @VisibleForTesting static SqlNode parseSingleStatementImpl(String sql, ParserConfig parserConfig, boolean isInnerQuery) { SqlNodeList list = parseMultipleStatementsImpl(sql, parserConfig, isInnerQuery); if (list.size() > 1) { SqlParserPos pos = list.get(1).getParserPosition(); int col = pos.getColumnNum(); String first = sql.substring(0, col); String second = sql.substring(col-1); UserException.Builder builder = UserException.parseError(); builder.message("Unable to parse multiple queries. First query is %s. Rest of submission is %s", first, second); throw builder.buildSilently(); } SqlNode newNode = list.get(0).accept(STRING_LITERAL_CONVERTER); return newNode; } SqlConverter( final PlannerSettings settings, final SqlOperatorTable operatorTable, final FunctionContext functionContext, final MaterializationDescriptorProvider materializationProvider, final FunctionImplementationRegistry functions, final UserSession session, final AttemptObserver observer, final Catalog catalog, final SubstitutionProviderFactory factory, final SabotConfig config, final ScanResult scanResult ); SqlConverter(SqlConverter parent, DremioCatalogReader catalog); }
SqlConverter { @VisibleForTesting static SqlNode parseSingleStatementImpl(String sql, ParserConfig parserConfig, boolean isInnerQuery) { SqlNodeList list = parseMultipleStatementsImpl(sql, parserConfig, isInnerQuery); if (list.size() > 1) { SqlParserPos pos = list.get(1).getParserPosition(); int col = pos.getColumnNum(); String first = sql.substring(0, col); String second = sql.substring(col-1); UserException.Builder builder = UserException.parseError(); builder.message("Unable to parse multiple queries. First query is %s. Rest of submission is %s", first, second); throw builder.buildSilently(); } SqlNode newNode = list.get(0).accept(STRING_LITERAL_CONVERTER); return newNode; } SqlConverter( final PlannerSettings settings, final SqlOperatorTable operatorTable, final FunctionContext functionContext, final MaterializationDescriptorProvider materializationProvider, final FunctionImplementationRegistry functions, final UserSession session, final AttemptObserver observer, final Catalog catalog, final SubstitutionProviderFactory factory, final SabotConfig config, final ScanResult scanResult ); SqlConverter(SqlConverter parent, DremioCatalogReader catalog); SqlNodeList parseMultipleStatements(String sql); SqlNode parse(String sql); ViewExpansionContext getViewExpansionContext(); UserSession getSession(); SqlNode validate(final SqlNode parsedNode); RelDataType getValidatedRowType(String sql); FunctionImplementationRegistry getFunctionImplementationRegistry(); PlannerSettings getSettings(); RelDataType getOutputType(SqlNode validatedNode); JavaTypeFactory getTypeFactory(); SqlOperatorTable getOpTab(); RelOptCostFactory getCostFactory(); FunctionContext getFunctionContext(); DremioCatalogReader getCatalogReader(); SqlParser.Config getParserConfig(); AttemptObserver getObserver(); MaterializationList getMaterializations(); int getNestingLevel(); RelOptCluster getCluster(); AccelerationAwareSubstitutionProvider getSubstitutionProvider(); RelSerializerFactory getSerializerFactory(); SabotConfig getConfig(); RelRootPlus toConvertibleRelRoot(final SqlNode validatedNode, boolean expand, boolean flatten); }
SqlConverter { @VisibleForTesting static SqlNode parseSingleStatementImpl(String sql, ParserConfig parserConfig, boolean isInnerQuery) { SqlNodeList list = parseMultipleStatementsImpl(sql, parserConfig, isInnerQuery); if (list.size() > 1) { SqlParserPos pos = list.get(1).getParserPosition(); int col = pos.getColumnNum(); String first = sql.substring(0, col); String second = sql.substring(col-1); UserException.Builder builder = UserException.parseError(); builder.message("Unable to parse multiple queries. First query is %s. Rest of submission is %s", first, second); throw builder.buildSilently(); } SqlNode newNode = list.get(0).accept(STRING_LITERAL_CONVERTER); return newNode; } SqlConverter( final PlannerSettings settings, final SqlOperatorTable operatorTable, final FunctionContext functionContext, final MaterializationDescriptorProvider materializationProvider, final FunctionImplementationRegistry functions, final UserSession session, final AttemptObserver observer, final Catalog catalog, final SubstitutionProviderFactory factory, final SabotConfig config, final ScanResult scanResult ); SqlConverter(SqlConverter parent, DremioCatalogReader catalog); SqlNodeList parseMultipleStatements(String sql); SqlNode parse(String sql); ViewExpansionContext getViewExpansionContext(); UserSession getSession(); SqlNode validate(final SqlNode parsedNode); RelDataType getValidatedRowType(String sql); FunctionImplementationRegistry getFunctionImplementationRegistry(); PlannerSettings getSettings(); RelDataType getOutputType(SqlNode validatedNode); JavaTypeFactory getTypeFactory(); SqlOperatorTable getOpTab(); RelOptCostFactory getCostFactory(); FunctionContext getFunctionContext(); DremioCatalogReader getCatalogReader(); SqlParser.Config getParserConfig(); AttemptObserver getObserver(); MaterializationList getMaterializations(); int getNestingLevel(); RelOptCluster getCluster(); AccelerationAwareSubstitutionProvider getSubstitutionProvider(); RelSerializerFactory getSerializerFactory(); SabotConfig getConfig(); RelRootPlus toConvertibleRelRoot(final SqlNode validatedNode, boolean expand, boolean flatten); static final SqlShuttle STRING_LITERAL_CONVERTER; }
@Test public void testSuggestion() { final SqlParserUtil.StringAndPos stringAndPos = SqlParserUtil.findPos(sql); List<SqlMoniker> suggestions = sqlAnalyzer.suggest(stringAndPos.sql, stringAndPos.cursor); assertEquals(expectedSuggestionCount, suggestions.size()); if (checkSuggestions) { assertSuggestions(suggestions); } }
public List<SqlMoniker> suggest(String sql, int cursorPosition) { SqlAdvisor sqlAdvisor = new SqlAdvisor(validator); String[] replaced = {null}; return sqlAdvisor.getCompletionHints(sql, cursorPosition , replaced); }
SQLAnalyzer { public List<SqlMoniker> suggest(String sql, int cursorPosition) { SqlAdvisor sqlAdvisor = new SqlAdvisor(validator); String[] replaced = {null}; return sqlAdvisor.getCompletionHints(sql, cursorPosition , replaced); } }
SQLAnalyzer { public List<SqlMoniker> suggest(String sql, int cursorPosition) { SqlAdvisor sqlAdvisor = new SqlAdvisor(validator); String[] replaced = {null}; return sqlAdvisor.getCompletionHints(sql, cursorPosition , replaced); } protected SQLAnalyzer(final SqlValidatorWithHints validator); }
SQLAnalyzer { public List<SqlMoniker> suggest(String sql, int cursorPosition) { SqlAdvisor sqlAdvisor = new SqlAdvisor(validator); String[] replaced = {null}; return sqlAdvisor.getCompletionHints(sql, cursorPosition , replaced); } protected SQLAnalyzer(final SqlValidatorWithHints validator); List<SqlMoniker> suggest(String sql, int cursorPosition); List<SqlAdvisor.ValidateErrorInfo> validate(String sql); }
SQLAnalyzer { public List<SqlMoniker> suggest(String sql, int cursorPosition) { SqlAdvisor sqlAdvisor = new SqlAdvisor(validator); String[] replaced = {null}; return sqlAdvisor.getCompletionHints(sql, cursorPosition , replaced); } protected SQLAnalyzer(final SqlValidatorWithHints validator); List<SqlMoniker> suggest(String sql, int cursorPosition); List<SqlAdvisor.ValidateErrorInfo> validate(String sql); }
@Test public void testValidation() { List<SqlAdvisor.ValidateErrorInfo> validationErrors = sqlAnalyzer.validate("select * from"); assertEquals(1, validationErrors.size()); assertEquals(10, validationErrors.get(0).getStartColumnNum()); assertEquals(13, validationErrors.get(0).getEndColumnNum()); }
public List<SqlAdvisor.ValidateErrorInfo> validate(String sql) { SqlAdvisor sqlAdvisor = new SqlAdvisor(validator); return sqlAdvisor.validate(sql); }
SQLAnalyzer { public List<SqlAdvisor.ValidateErrorInfo> validate(String sql) { SqlAdvisor sqlAdvisor = new SqlAdvisor(validator); return sqlAdvisor.validate(sql); } }
SQLAnalyzer { public List<SqlAdvisor.ValidateErrorInfo> validate(String sql) { SqlAdvisor sqlAdvisor = new SqlAdvisor(validator); return sqlAdvisor.validate(sql); } protected SQLAnalyzer(final SqlValidatorWithHints validator); }
SQLAnalyzer { public List<SqlAdvisor.ValidateErrorInfo> validate(String sql) { SqlAdvisor sqlAdvisor = new SqlAdvisor(validator); return sqlAdvisor.validate(sql); } protected SQLAnalyzer(final SqlValidatorWithHints validator); List<SqlMoniker> suggest(String sql, int cursorPosition); List<SqlAdvisor.ValidateErrorInfo> validate(String sql); }
SQLAnalyzer { public List<SqlAdvisor.ValidateErrorInfo> validate(String sql) { SqlAdvisor sqlAdvisor = new SqlAdvisor(validator); return sqlAdvisor.validate(sql); } protected SQLAnalyzer(final SqlValidatorWithHints validator); List<SqlMoniker> suggest(String sql, int cursorPosition); List<SqlAdvisor.ValidateErrorInfo> validate(String sql); }
@Test public void simpleSelectNoLimit() { Prel input = newScreen( newProject(exprs(), rowType(), newUnionExchange( newProject(exprs(), rowType(), newSoftScan(rowType()) ) ) ) ); Prel output = SimpleLimitExchangeRemover.apply(plannerSettings, input); verifyOutput(output, "Screen", "Project", "UnionExchange", "Project", "SystemScan"); }
public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } static Prel apply(PlannerSettings settings, Prel input); }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } static Prel apply(PlannerSettings settings, Prel input); }
@Test public void simpleSelectWithLimitWithSoftScan() { Prel input = newScreen( newLimit(0, 10, newProject(exprs(), rowType(), newUnionExchange( newLimit(0, 10, newProject(exprs(), rowType(), newSoftScan(rowType()) ) ) ) ) ) ); Prel output = SimpleLimitExchangeRemover.apply(plannerSettings, input); verifyOutput(output, "Screen", "Limit", "Project", "Limit", "Project", "SystemScan"); }
public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } static Prel apply(PlannerSettings settings, Prel input); }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } static Prel apply(PlannerSettings settings, Prel input); }
@Test public void simpleSelectWithLimitWithSoftScanWithLeafLimitsEnabled() { OptionValue optionEnabled = OptionValue.createBoolean(OptionValue.OptionType.QUERY, PlannerSettings.ENABLE_LEAF_LIMITS.getOptionName(), true); when(optionManager.getOption(PlannerSettings.ENABLE_LEAF_LIMITS.getOptionName())).thenReturn(optionEnabled); optionList.remove(PlannerSettings.ENABLE_LEAF_LIMITS.getDefault()); optionList.add(optionEnabled); Prel input = newScreen( newLimit(0, 10, newProject(exprs(), rowType(), newUnionExchange( newLimit(0, 10, newProject(exprs(), rowType(), newSoftScan(rowType()) ) ) ) ) ) ); Prel output = SimpleLimitExchangeRemover.apply(plannerSettings, input); verifyOutput(output, "Screen", "Limit", "Project", "UnionExchange", "Limit", "Project", "SystemScan"); }
public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } static Prel apply(PlannerSettings settings, Prel input); }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } static Prel apply(PlannerSettings settings, Prel input); }
@Test public void simpleSelectWithLargeLimitWithSoftScan() { Prel input = newScreen( newLimit(0, 200000, newProject(exprs(), rowType(), newUnionExchange( newLimit(0, 200000, newProject(exprs(), rowType(), newSoftScan(rowType()) ) ) ) ) ) ); Prel output = SimpleLimitExchangeRemover.apply(plannerSettings, input); verifyOutput(output, "Screen", "Limit", "Project", "UnionExchange", "Limit", "Project", "SystemScan"); }
public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } static Prel apply(PlannerSettings settings, Prel input); }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } static Prel apply(PlannerSettings settings, Prel input); }
@Test public void simpleSelectWithLimitWithHardScan() { Prel input = newScreen( newLimit(0, 10, newProject(exprs(), rowType(), newUnionExchange( newLimit(0, 10, newProject(exprs(), rowType(), newHardScan(rowType()) ) ) ) ) ) ); Prel output = SimpleLimitExchangeRemover.apply(plannerSettings, input); verifyOutput(output, "Screen", "Limit", "Project", "UnionExchange", "Limit", "Project", "SystemScan"); }
public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } static Prel apply(PlannerSettings settings, Prel input); }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } static Prel apply(PlannerSettings settings, Prel input); }
@Test public void testParseJsonPath3() throws Exception { JsonPath p = JSONElementLocator.parsePath("value[0][1][2]"); assertEquals(p.toString(), 3, p.size()); assertEquals(new JsonPath(new ArrayJsonPathElement(0), new ArrayJsonPathElement(1), new ArrayJsonPathElement(2)), p); }
public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); }
JSONElementLocator { public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); } }
JSONElementLocator { public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); } JSONElementLocator(String text); }
JSONElementLocator { public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); } JSONElementLocator(String text); static JsonPath parsePath(String path); Interval locatePath(JsonPath searchedPath); JsonSelection locate(int selectionStart, int selectionEnd); }
JSONElementLocator { public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); } JSONElementLocator(String text); static JsonPath parsePath(String path); Interval locatePath(JsonPath searchedPath); JsonSelection locate(int selectionStart, int selectionEnd); }
@Test public void joinWithLimitWithSoftScan() { Prel input = newScreen( newLimit(0, 10, newProject(exprs(), rowType(), newUnionExchange( newJoin( newProject(exprs(), rowType(), newSoftScan(rowType()) ), newProject(exprs(), rowType(), newSoftScan(rowType()) ) ) ) ) ) ); Prel output = SimpleLimitExchangeRemover.apply(plannerSettings, input); verifyOutput(output, "Screen", "Limit", "Project", "UnionExchange", "HashJoin", "Project", "SystemScan", "Project", "SystemScan"); }
public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } static Prel apply(PlannerSettings settings, Prel input); }
SimpleLimitExchangeRemover { public static Prel apply(PlannerSettings settings, Prel input){ if(!settings.isTrivialSingularOptimized() || settings.isLeafLimitsEnabled()) { return input; } if(input.accept(new Identifier(), false)){ return input.accept(new AllExchangeRemover(), null); } return input; } static Prel apply(PlannerSettings settings, Prel input); }
@Test public void testRuleNoMatch() throws Exception { final TestScanPrel scan = new TestScanPrel(cluster, TRAITS, table, pluginId, metadata, PROJECTED_COLUMNS, 0, true); final LimitPrel limitNode = new LimitPrel(cluster, TRAITS, scan, REX_BUILDER.makeExactLiteral(BigDecimal.valueOf(offset)), REX_BUILDER.makeExactLiteral(BigDecimal.valueOf(fetch))); final RelOptRuleCall call = newCall(rel -> fail("Unexpected call to transformTo"), limitNode, scan); assertFalse(PushLimitToPruneableScan.INSTANCE.matches(call)); }
@Override public boolean matches(RelOptRuleCall call) { return !((ScanPrelBase) call.rel(1)).hasFilter() && call.rel(1) instanceof PruneableScan; }
PushLimitToPruneableScan extends Prule { @Override public boolean matches(RelOptRuleCall call) { return !((ScanPrelBase) call.rel(1)).hasFilter() && call.rel(1) instanceof PruneableScan; } }
PushLimitToPruneableScan extends Prule { @Override public boolean matches(RelOptRuleCall call) { return !((ScanPrelBase) call.rel(1)).hasFilter() && call.rel(1) instanceof PruneableScan; } private PushLimitToPruneableScan(); }
PushLimitToPruneableScan extends Prule { @Override public boolean matches(RelOptRuleCall call) { return !((ScanPrelBase) call.rel(1)).hasFilter() && call.rel(1) instanceof PruneableScan; } private PushLimitToPruneableScan(); @Override boolean matches(RelOptRuleCall call); @Override void onMatch(RelOptRuleCall call); }
PushLimitToPruneableScan extends Prule { @Override public boolean matches(RelOptRuleCall call) { return !((ScanPrelBase) call.rel(1)).hasFilter() && call.rel(1) instanceof PruneableScan; } private PushLimitToPruneableScan(); @Override boolean matches(RelOptRuleCall call); @Override void onMatch(RelOptRuleCall call); static final RelOptRule INSTANCE; }
@Test public void testUnderflowSize() { KeyFairSliceCalculator keyFairSliceCalculator = new KeyFairSliceCalculator(newHashMap("k1", 4, "k2", 4, "k3", 4), 16); assertEquals("Invalid combined key size", 13, keyFairSliceCalculator.getTotalSize()); }
public int getTotalSize() { return totalSize; }
KeyFairSliceCalculator { public int getTotalSize() { return totalSize; } }
KeyFairSliceCalculator { public int getTotalSize() { return totalSize; } KeyFairSliceCalculator(Map<String, Integer> originalKeySizes, int maxTotalSize); }
KeyFairSliceCalculator { public int getTotalSize() { return totalSize; } KeyFairSliceCalculator(Map<String, Integer> originalKeySizes, int maxTotalSize); Integer getKeySlice(String key); int getTotalSize(); boolean keysTrimmed(); int numValidityBytes(); }
KeyFairSliceCalculator { public int getTotalSize() { return totalSize; } KeyFairSliceCalculator(Map<String, Integer> originalKeySizes, int maxTotalSize); Integer getKeySlice(String key); int getTotalSize(); boolean keysTrimmed(); int numValidityBytes(); }
@Test public void testDictionaryFileName() throws Exception { assertEquals("_foo.dict", GlobalDictionaryBuilder.dictionaryFileName("foo")); assertEquals("_a.b.c.dict", GlobalDictionaryBuilder.dictionaryFileName("a.b.c")); assertEquals("_foo.dict", GlobalDictionaryBuilder.dictionaryFileName(new ColumnDescriptor(new String[]{"foo"}, INT64, 0, 1))); assertEquals("_a.b.c.dict", GlobalDictionaryBuilder.dictionaryFileName(new ColumnDescriptor(new String[]{"a", "b", "c"}, INT64, 0, 1))); }
public static String dictionaryFileName(String columnFullPath) { return format("_%s.%s", columnFullPath, DICTIONARY_FILES_EXTENSION); }
GlobalDictionaryBuilder { public static String dictionaryFileName(String columnFullPath) { return format("_%s.%s", columnFullPath, DICTIONARY_FILES_EXTENSION); } }
GlobalDictionaryBuilder { public static String dictionaryFileName(String columnFullPath) { return format("_%s.%s", columnFullPath, DICTIONARY_FILES_EXTENSION); } }
GlobalDictionaryBuilder { public static String dictionaryFileName(String columnFullPath) { return format("_%s.%s", columnFullPath, DICTIONARY_FILES_EXTENSION); } static String dictionaryFileName(String columnFullPath); static String dictionaryFileName(ColumnDescriptor columnDescriptor); static Path dictionaryFilePath(Path dictionaryRootDir, String columnFullPath); static Path dictionaryFilePath(Path dictionaryRootDir, ColumnDescriptor columnDescriptor); static String getColumnFullPath(String dictionaryFileName); static long getDictionaryVersion(FileSystem fs, Path tableDir); static String dictionaryRootDirName(long version); static Path getDictionaryVersionedRootPath(FileSystem fs, Path tableDir, long version); static Path createDictionaryVersionedRootPath(FileSystem fs, Path tableDir, long nextVersion, Path tmpDictionaryRootPath); static Path getDictionaryFile(FileSystem fs, Path dictRootDir, String columnFullPath); static Path getDictionaryFile(FileSystem fs, Path dictRootDir, ColumnDescriptor columnDescriptor); static Map<String, Path> listDictionaryFiles(FileSystem fs, Path dictRootDir); static VectorContainer readDictionary(FileSystem fs, Path dictionaryRootDir, String columnFullPath, BufferAllocator bufferAllocator); static VectorContainer readDictionary(FileSystem fs, Path dictionaryRootDir, ColumnDescriptor columnDescriptor, BufferAllocator bufferAllocator); static VectorContainer readDictionary(FileSystem fs, Path dictionaryFile, BufferAllocator bufferAllocator); static GlobalDictionariesInfo updateGlobalDictionaries(CompressionCodecFactory codecFactory, FileSystem fs, Path tableDir, Path partitionDir, BufferAllocator bufferAllocator); static GlobalDictionariesInfo createGlobalDictionaries(CompressionCodecFactory codecFactory, FileSystem fs, Path tableDir, BufferAllocator bufferAllocator); static void writeDictionary(OutputStream out, VectorAccessible input, int recordCount, BufferAllocator bufferAllocator); static void main(String []args); }
GlobalDictionaryBuilder { public static String dictionaryFileName(String columnFullPath) { return format("_%s.%s", columnFullPath, DICTIONARY_FILES_EXTENSION); } static String dictionaryFileName(String columnFullPath); static String dictionaryFileName(ColumnDescriptor columnDescriptor); static Path dictionaryFilePath(Path dictionaryRootDir, String columnFullPath); static Path dictionaryFilePath(Path dictionaryRootDir, ColumnDescriptor columnDescriptor); static String getColumnFullPath(String dictionaryFileName); static long getDictionaryVersion(FileSystem fs, Path tableDir); static String dictionaryRootDirName(long version); static Path getDictionaryVersionedRootPath(FileSystem fs, Path tableDir, long version); static Path createDictionaryVersionedRootPath(FileSystem fs, Path tableDir, long nextVersion, Path tmpDictionaryRootPath); static Path getDictionaryFile(FileSystem fs, Path dictRootDir, String columnFullPath); static Path getDictionaryFile(FileSystem fs, Path dictRootDir, ColumnDescriptor columnDescriptor); static Map<String, Path> listDictionaryFiles(FileSystem fs, Path dictRootDir); static VectorContainer readDictionary(FileSystem fs, Path dictionaryRootDir, String columnFullPath, BufferAllocator bufferAllocator); static VectorContainer readDictionary(FileSystem fs, Path dictionaryRootDir, ColumnDescriptor columnDescriptor, BufferAllocator bufferAllocator); static VectorContainer readDictionary(FileSystem fs, Path dictionaryFile, BufferAllocator bufferAllocator); static GlobalDictionariesInfo updateGlobalDictionaries(CompressionCodecFactory codecFactory, FileSystem fs, Path tableDir, Path partitionDir, BufferAllocator bufferAllocator); static GlobalDictionariesInfo createGlobalDictionaries(CompressionCodecFactory codecFactory, FileSystem fs, Path tableDir, BufferAllocator bufferAllocator); static void writeDictionary(OutputStream out, VectorAccessible input, int recordCount, BufferAllocator bufferAllocator); static void main(String []args); static final String DICTIONARY_TEMP_ROOT_PREFIX; static final String DICTIONARY_ROOT_PREFIX; static final Pattern DICTIONARY_VERSION_PATTERN; static final Predicate<Path> DICTIONARY_ROOT_FILTER; static final String DICTIONARY_FILES_EXTENSION; static final Predicate<Path> DICTIONARY_FILES_FILTER; static final Pattern DICTIONARY_FILES_PATTERN; }
@Test public void testExtractColumnName() throws Exception { assertEquals("foo", GlobalDictionaryBuilder.getColumnFullPath("_foo.dict")); assertEquals("a.b.c", GlobalDictionaryBuilder.getColumnFullPath("_a.b.c.dict")); }
public static String getColumnFullPath(String dictionaryFileName) { final Matcher matcher = DICTIONARY_FILES_PATTERN.matcher(dictionaryFileName); if (matcher.find()) { return matcher.group(1); } return null; }
GlobalDictionaryBuilder { public static String getColumnFullPath(String dictionaryFileName) { final Matcher matcher = DICTIONARY_FILES_PATTERN.matcher(dictionaryFileName); if (matcher.find()) { return matcher.group(1); } return null; } }
GlobalDictionaryBuilder { public static String getColumnFullPath(String dictionaryFileName) { final Matcher matcher = DICTIONARY_FILES_PATTERN.matcher(dictionaryFileName); if (matcher.find()) { return matcher.group(1); } return null; } }
GlobalDictionaryBuilder { public static String getColumnFullPath(String dictionaryFileName) { final Matcher matcher = DICTIONARY_FILES_PATTERN.matcher(dictionaryFileName); if (matcher.find()) { return matcher.group(1); } return null; } static String dictionaryFileName(String columnFullPath); static String dictionaryFileName(ColumnDescriptor columnDescriptor); static Path dictionaryFilePath(Path dictionaryRootDir, String columnFullPath); static Path dictionaryFilePath(Path dictionaryRootDir, ColumnDescriptor columnDescriptor); static String getColumnFullPath(String dictionaryFileName); static long getDictionaryVersion(FileSystem fs, Path tableDir); static String dictionaryRootDirName(long version); static Path getDictionaryVersionedRootPath(FileSystem fs, Path tableDir, long version); static Path createDictionaryVersionedRootPath(FileSystem fs, Path tableDir, long nextVersion, Path tmpDictionaryRootPath); static Path getDictionaryFile(FileSystem fs, Path dictRootDir, String columnFullPath); static Path getDictionaryFile(FileSystem fs, Path dictRootDir, ColumnDescriptor columnDescriptor); static Map<String, Path> listDictionaryFiles(FileSystem fs, Path dictRootDir); static VectorContainer readDictionary(FileSystem fs, Path dictionaryRootDir, String columnFullPath, BufferAllocator bufferAllocator); static VectorContainer readDictionary(FileSystem fs, Path dictionaryRootDir, ColumnDescriptor columnDescriptor, BufferAllocator bufferAllocator); static VectorContainer readDictionary(FileSystem fs, Path dictionaryFile, BufferAllocator bufferAllocator); static GlobalDictionariesInfo updateGlobalDictionaries(CompressionCodecFactory codecFactory, FileSystem fs, Path tableDir, Path partitionDir, BufferAllocator bufferAllocator); static GlobalDictionariesInfo createGlobalDictionaries(CompressionCodecFactory codecFactory, FileSystem fs, Path tableDir, BufferAllocator bufferAllocator); static void writeDictionary(OutputStream out, VectorAccessible input, int recordCount, BufferAllocator bufferAllocator); static void main(String []args); }
GlobalDictionaryBuilder { public static String getColumnFullPath(String dictionaryFileName) { final Matcher matcher = DICTIONARY_FILES_PATTERN.matcher(dictionaryFileName); if (matcher.find()) { return matcher.group(1); } return null; } static String dictionaryFileName(String columnFullPath); static String dictionaryFileName(ColumnDescriptor columnDescriptor); static Path dictionaryFilePath(Path dictionaryRootDir, String columnFullPath); static Path dictionaryFilePath(Path dictionaryRootDir, ColumnDescriptor columnDescriptor); static String getColumnFullPath(String dictionaryFileName); static long getDictionaryVersion(FileSystem fs, Path tableDir); static String dictionaryRootDirName(long version); static Path getDictionaryVersionedRootPath(FileSystem fs, Path tableDir, long version); static Path createDictionaryVersionedRootPath(FileSystem fs, Path tableDir, long nextVersion, Path tmpDictionaryRootPath); static Path getDictionaryFile(FileSystem fs, Path dictRootDir, String columnFullPath); static Path getDictionaryFile(FileSystem fs, Path dictRootDir, ColumnDescriptor columnDescriptor); static Map<String, Path> listDictionaryFiles(FileSystem fs, Path dictRootDir); static VectorContainer readDictionary(FileSystem fs, Path dictionaryRootDir, String columnFullPath, BufferAllocator bufferAllocator); static VectorContainer readDictionary(FileSystem fs, Path dictionaryRootDir, ColumnDescriptor columnDescriptor, BufferAllocator bufferAllocator); static VectorContainer readDictionary(FileSystem fs, Path dictionaryFile, BufferAllocator bufferAllocator); static GlobalDictionariesInfo updateGlobalDictionaries(CompressionCodecFactory codecFactory, FileSystem fs, Path tableDir, Path partitionDir, BufferAllocator bufferAllocator); static GlobalDictionariesInfo createGlobalDictionaries(CompressionCodecFactory codecFactory, FileSystem fs, Path tableDir, BufferAllocator bufferAllocator); static void writeDictionary(OutputStream out, VectorAccessible input, int recordCount, BufferAllocator bufferAllocator); static void main(String []args); static final String DICTIONARY_TEMP_ROOT_PREFIX; static final String DICTIONARY_ROOT_PREFIX; static final Pattern DICTIONARY_VERSION_PATTERN; static final Predicate<Path> DICTIONARY_ROOT_FILTER; static final String DICTIONARY_FILES_EXTENSION; static final Predicate<Path> DICTIONARY_FILES_FILTER; static final Pattern DICTIONARY_FILES_PATTERN; }
@Test public void syntheticSimple() { ConfigurableOperator cnb = new ConfigurableOperator(OpProps.prototype(1, Long.MAX_VALUE).cloneWithMemoryExpensive(true).cloneWithBound(false).cloneWithMemoryFactor(2.0d), ARBTRIARY_LEAF); ConfigurableOperator cb = new ConfigurableOperator(OpProps.prototype(1, Long.MAX_VALUE).cloneWithMemoryExpensive(true).cloneWithBound(true).cloneWithMemoryFactor(1.0d), cnb); Fragment f1 = new Fragment(); f1.addOperator(cb); Wrapper w1 = new Wrapper(f1, 0); w1.overrideEndpoints(Collections.singletonList(N1)); MemoryAllocationUtilities.setMemory(options, ImmutableMap.of(f1, w1), 10); assertEquals(Long.MAX_VALUE, cnb.getProps().getMemLimit()); assertEquals(3, cb.getProps().getMemLimit()); }
@VisibleForTesting static void setMemory(final OptionManager optionManager, Map<Fragment, Wrapper> fragments, long maxMemoryPerNodePerQuery) { final ArrayListMultimap<NodeEndpoint, PhysicalOperator> consideredOps = ArrayListMultimap.create(); final ArrayListMultimap<NodeEndpoint, PhysicalOperator> nonConsideredOps = ArrayListMultimap.create(); long queryMaxAllocation = Long.MAX_VALUE; for(Entry<Fragment, Wrapper> entry: fragments.entrySet()) { FindConsideredOperators fco = new FindConsideredOperators(); entry.getKey().getRoot().accept(fco, null); for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { consideredOps.putAll(e, fco.getConsideredOperators()); } for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { nonConsideredOps.putAll(e, fco.getNonConsideredOperators()); } } for(NodeEndpoint ep : consideredOps.keySet()) { long outsideReserve = nonConsideredOps.get(ep).stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); List<PhysicalOperator> ops = consideredOps.get(ep); long consideredOpsReserve = ops.stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); if (outsideReserve + consideredOpsReserve > queryMaxAllocation) { throw UserException.resourceError() .message("Query was cancelled because the initial memory requirement (%s) is greater than the job memory limit set by the administrator (%s).", PrettyPrintUtils.bytePrint(outsideReserve + consideredOpsReserve, true), PrettyPrintUtils.bytePrint(queryMaxAllocation, true)) .build(logger); } final double totalWeights = ops.stream().mapToDouble(t -> t.getProps().getMemoryFactor()).sum(); final long memoryForHeavyOperations = maxMemoryPerNodePerQuery - outsideReserve; if(memoryForHeavyOperations < 1) { throw UserException.memoryError() .message("Query was cancelled because it exceeded the memory limits set by the administrator. Expected at least %s bytes, but only had %s available.", PrettyPrintUtils.bytePrint(outsideReserve, true), PrettyPrintUtils.bytePrint(maxMemoryPerNodePerQuery, true)) .build(logger); } final double baseWeight = memoryForHeavyOperations/totalWeights; ops.stream() .filter(op -> op.getProps().isMemoryBound()) .forEach(op -> { long targetValue = (long) (baseWeight * op.getProps().getMemoryFactor()); targetValue = Math.max(Math.min(targetValue, op.getProps().getMemLimit()), op.getProps().getMemReserve()); long lowLimit = op.getProps().getMemLowLimit(); long highLimit = op.getProps().getMemLimit(); op.getProps().setMemLimit(targetValue); if (targetValue < lowLimit) { op.getProps().setMemLimit(lowLimit); } if (targetValue > highLimit) { op.getProps().setMemLimit(highLimit); } }); } }
MemoryAllocationUtilities { @VisibleForTesting static void setMemory(final OptionManager optionManager, Map<Fragment, Wrapper> fragments, long maxMemoryPerNodePerQuery) { final ArrayListMultimap<NodeEndpoint, PhysicalOperator> consideredOps = ArrayListMultimap.create(); final ArrayListMultimap<NodeEndpoint, PhysicalOperator> nonConsideredOps = ArrayListMultimap.create(); long queryMaxAllocation = Long.MAX_VALUE; for(Entry<Fragment, Wrapper> entry: fragments.entrySet()) { FindConsideredOperators fco = new FindConsideredOperators(); entry.getKey().getRoot().accept(fco, null); for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { consideredOps.putAll(e, fco.getConsideredOperators()); } for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { nonConsideredOps.putAll(e, fco.getNonConsideredOperators()); } } for(NodeEndpoint ep : consideredOps.keySet()) { long outsideReserve = nonConsideredOps.get(ep).stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); List<PhysicalOperator> ops = consideredOps.get(ep); long consideredOpsReserve = ops.stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); if (outsideReserve + consideredOpsReserve > queryMaxAllocation) { throw UserException.resourceError() .message("Query was cancelled because the initial memory requirement (%s) is greater than the job memory limit set by the administrator (%s).", PrettyPrintUtils.bytePrint(outsideReserve + consideredOpsReserve, true), PrettyPrintUtils.bytePrint(queryMaxAllocation, true)) .build(logger); } final double totalWeights = ops.stream().mapToDouble(t -> t.getProps().getMemoryFactor()).sum(); final long memoryForHeavyOperations = maxMemoryPerNodePerQuery - outsideReserve; if(memoryForHeavyOperations < 1) { throw UserException.memoryError() .message("Query was cancelled because it exceeded the memory limits set by the administrator. Expected at least %s bytes, but only had %s available.", PrettyPrintUtils.bytePrint(outsideReserve, true), PrettyPrintUtils.bytePrint(maxMemoryPerNodePerQuery, true)) .build(logger); } final double baseWeight = memoryForHeavyOperations/totalWeights; ops.stream() .filter(op -> op.getProps().isMemoryBound()) .forEach(op -> { long targetValue = (long) (baseWeight * op.getProps().getMemoryFactor()); targetValue = Math.max(Math.min(targetValue, op.getProps().getMemLimit()), op.getProps().getMemReserve()); long lowLimit = op.getProps().getMemLowLimit(); long highLimit = op.getProps().getMemLimit(); op.getProps().setMemLimit(targetValue); if (targetValue < lowLimit) { op.getProps().setMemLimit(lowLimit); } if (targetValue > highLimit) { op.getProps().setMemLimit(highLimit); } }); } } }
MemoryAllocationUtilities { @VisibleForTesting static void setMemory(final OptionManager optionManager, Map<Fragment, Wrapper> fragments, long maxMemoryPerNodePerQuery) { final ArrayListMultimap<NodeEndpoint, PhysicalOperator> consideredOps = ArrayListMultimap.create(); final ArrayListMultimap<NodeEndpoint, PhysicalOperator> nonConsideredOps = ArrayListMultimap.create(); long queryMaxAllocation = Long.MAX_VALUE; for(Entry<Fragment, Wrapper> entry: fragments.entrySet()) { FindConsideredOperators fco = new FindConsideredOperators(); entry.getKey().getRoot().accept(fco, null); for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { consideredOps.putAll(e, fco.getConsideredOperators()); } for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { nonConsideredOps.putAll(e, fco.getNonConsideredOperators()); } } for(NodeEndpoint ep : consideredOps.keySet()) { long outsideReserve = nonConsideredOps.get(ep).stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); List<PhysicalOperator> ops = consideredOps.get(ep); long consideredOpsReserve = ops.stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); if (outsideReserve + consideredOpsReserve > queryMaxAllocation) { throw UserException.resourceError() .message("Query was cancelled because the initial memory requirement (%s) is greater than the job memory limit set by the administrator (%s).", PrettyPrintUtils.bytePrint(outsideReserve + consideredOpsReserve, true), PrettyPrintUtils.bytePrint(queryMaxAllocation, true)) .build(logger); } final double totalWeights = ops.stream().mapToDouble(t -> t.getProps().getMemoryFactor()).sum(); final long memoryForHeavyOperations = maxMemoryPerNodePerQuery - outsideReserve; if(memoryForHeavyOperations < 1) { throw UserException.memoryError() .message("Query was cancelled because it exceeded the memory limits set by the administrator. Expected at least %s bytes, but only had %s available.", PrettyPrintUtils.bytePrint(outsideReserve, true), PrettyPrintUtils.bytePrint(maxMemoryPerNodePerQuery, true)) .build(logger); } final double baseWeight = memoryForHeavyOperations/totalWeights; ops.stream() .filter(op -> op.getProps().isMemoryBound()) .forEach(op -> { long targetValue = (long) (baseWeight * op.getProps().getMemoryFactor()); targetValue = Math.max(Math.min(targetValue, op.getProps().getMemLimit()), op.getProps().getMemReserve()); long lowLimit = op.getProps().getMemLowLimit(); long highLimit = op.getProps().getMemLimit(); op.getProps().setMemLimit(targetValue); if (targetValue < lowLimit) { op.getProps().setMemLimit(lowLimit); } if (targetValue > highLimit) { op.getProps().setMemLimit(highLimit); } }); } } private MemoryAllocationUtilities(); }
MemoryAllocationUtilities { @VisibleForTesting static void setMemory(final OptionManager optionManager, Map<Fragment, Wrapper> fragments, long maxMemoryPerNodePerQuery) { final ArrayListMultimap<NodeEndpoint, PhysicalOperator> consideredOps = ArrayListMultimap.create(); final ArrayListMultimap<NodeEndpoint, PhysicalOperator> nonConsideredOps = ArrayListMultimap.create(); long queryMaxAllocation = Long.MAX_VALUE; for(Entry<Fragment, Wrapper> entry: fragments.entrySet()) { FindConsideredOperators fco = new FindConsideredOperators(); entry.getKey().getRoot().accept(fco, null); for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { consideredOps.putAll(e, fco.getConsideredOperators()); } for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { nonConsideredOps.putAll(e, fco.getNonConsideredOperators()); } } for(NodeEndpoint ep : consideredOps.keySet()) { long outsideReserve = nonConsideredOps.get(ep).stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); List<PhysicalOperator> ops = consideredOps.get(ep); long consideredOpsReserve = ops.stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); if (outsideReserve + consideredOpsReserve > queryMaxAllocation) { throw UserException.resourceError() .message("Query was cancelled because the initial memory requirement (%s) is greater than the job memory limit set by the administrator (%s).", PrettyPrintUtils.bytePrint(outsideReserve + consideredOpsReserve, true), PrettyPrintUtils.bytePrint(queryMaxAllocation, true)) .build(logger); } final double totalWeights = ops.stream().mapToDouble(t -> t.getProps().getMemoryFactor()).sum(); final long memoryForHeavyOperations = maxMemoryPerNodePerQuery - outsideReserve; if(memoryForHeavyOperations < 1) { throw UserException.memoryError() .message("Query was cancelled because it exceeded the memory limits set by the administrator. Expected at least %s bytes, but only had %s available.", PrettyPrintUtils.bytePrint(outsideReserve, true), PrettyPrintUtils.bytePrint(maxMemoryPerNodePerQuery, true)) .build(logger); } final double baseWeight = memoryForHeavyOperations/totalWeights; ops.stream() .filter(op -> op.getProps().isMemoryBound()) .forEach(op -> { long targetValue = (long) (baseWeight * op.getProps().getMemoryFactor()); targetValue = Math.max(Math.min(targetValue, op.getProps().getMemLimit()), op.getProps().getMemReserve()); long lowLimit = op.getProps().getMemLowLimit(); long highLimit = op.getProps().getMemLimit(); op.getProps().setMemLimit(targetValue); if (targetValue < lowLimit) { op.getProps().setMemLimit(lowLimit); } if (targetValue > highLimit) { op.getProps().setMemLimit(highLimit); } }); } } private MemoryAllocationUtilities(); static void setupBoundedMemoryAllocations( final PhysicalPlan plan, final OptionManager optionManager, final GroupResourceInformation clusterInfo, final PlanningSet planningSet, final long allocatedMemoryPerQuery ); }
MemoryAllocationUtilities { @VisibleForTesting static void setMemory(final OptionManager optionManager, Map<Fragment, Wrapper> fragments, long maxMemoryPerNodePerQuery) { final ArrayListMultimap<NodeEndpoint, PhysicalOperator> consideredOps = ArrayListMultimap.create(); final ArrayListMultimap<NodeEndpoint, PhysicalOperator> nonConsideredOps = ArrayListMultimap.create(); long queryMaxAllocation = Long.MAX_VALUE; for(Entry<Fragment, Wrapper> entry: fragments.entrySet()) { FindConsideredOperators fco = new FindConsideredOperators(); entry.getKey().getRoot().accept(fco, null); for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { consideredOps.putAll(e, fco.getConsideredOperators()); } for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { nonConsideredOps.putAll(e, fco.getNonConsideredOperators()); } } for(NodeEndpoint ep : consideredOps.keySet()) { long outsideReserve = nonConsideredOps.get(ep).stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); List<PhysicalOperator> ops = consideredOps.get(ep); long consideredOpsReserve = ops.stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); if (outsideReserve + consideredOpsReserve > queryMaxAllocation) { throw UserException.resourceError() .message("Query was cancelled because the initial memory requirement (%s) is greater than the job memory limit set by the administrator (%s).", PrettyPrintUtils.bytePrint(outsideReserve + consideredOpsReserve, true), PrettyPrintUtils.bytePrint(queryMaxAllocation, true)) .build(logger); } final double totalWeights = ops.stream().mapToDouble(t -> t.getProps().getMemoryFactor()).sum(); final long memoryForHeavyOperations = maxMemoryPerNodePerQuery - outsideReserve; if(memoryForHeavyOperations < 1) { throw UserException.memoryError() .message("Query was cancelled because it exceeded the memory limits set by the administrator. Expected at least %s bytes, but only had %s available.", PrettyPrintUtils.bytePrint(outsideReserve, true), PrettyPrintUtils.bytePrint(maxMemoryPerNodePerQuery, true)) .build(logger); } final double baseWeight = memoryForHeavyOperations/totalWeights; ops.stream() .filter(op -> op.getProps().isMemoryBound()) .forEach(op -> { long targetValue = (long) (baseWeight * op.getProps().getMemoryFactor()); targetValue = Math.max(Math.min(targetValue, op.getProps().getMemLimit()), op.getProps().getMemReserve()); long lowLimit = op.getProps().getMemLowLimit(); long highLimit = op.getProps().getMemLimit(); op.getProps().setMemLimit(targetValue); if (targetValue < lowLimit) { op.getProps().setMemLimit(lowLimit); } if (targetValue > highLimit) { op.getProps().setMemLimit(highLimit); } }); } } private MemoryAllocationUtilities(); static void setupBoundedMemoryAllocations( final PhysicalPlan plan, final OptionManager optionManager, final GroupResourceInformation clusterInfo, final PlanningSet planningSet, final long allocatedMemoryPerQuery ); }
@Test public void doubleSort() { ExternalSort es1 = new ExternalSort(OpProps.prototype().cloneWithNewReserve(0).cloneWithMemoryExpensive(true).cloneWithMemoryFactor(options.getOption(SORT_FACTOR)).cloneWithBound(options.getOption(SORT_BOUNDED)), ARBTRIARY_LEAF, Collections.emptyList(), false); ExternalSort es2 = new ExternalSort(OpProps.prototype().cloneWithNewReserve(0).cloneWithMemoryExpensive(true).cloneWithMemoryFactor(options.getOption(SORT_FACTOR)).cloneWithBound(options.getOption(SORT_BOUNDED)), es1, Collections.emptyList(), false); Fragment f1 = new Fragment(); f1.addOperator(es2); Wrapper wrapper = new Wrapper(f1, 0); wrapper.overrideEndpoints(Collections.singletonList(N1)); MemoryAllocationUtilities.setMemory(options, ImmutableMap.of(f1, wrapper), 10); assertEquals(4l, es1.getProps().getMemLimit()); assertEquals(4l, es2.getProps().getMemLimit()); }
@VisibleForTesting static void setMemory(final OptionManager optionManager, Map<Fragment, Wrapper> fragments, long maxMemoryPerNodePerQuery) { final ArrayListMultimap<NodeEndpoint, PhysicalOperator> consideredOps = ArrayListMultimap.create(); final ArrayListMultimap<NodeEndpoint, PhysicalOperator> nonConsideredOps = ArrayListMultimap.create(); long queryMaxAllocation = Long.MAX_VALUE; for(Entry<Fragment, Wrapper> entry: fragments.entrySet()) { FindConsideredOperators fco = new FindConsideredOperators(); entry.getKey().getRoot().accept(fco, null); for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { consideredOps.putAll(e, fco.getConsideredOperators()); } for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { nonConsideredOps.putAll(e, fco.getNonConsideredOperators()); } } for(NodeEndpoint ep : consideredOps.keySet()) { long outsideReserve = nonConsideredOps.get(ep).stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); List<PhysicalOperator> ops = consideredOps.get(ep); long consideredOpsReserve = ops.stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); if (outsideReserve + consideredOpsReserve > queryMaxAllocation) { throw UserException.resourceError() .message("Query was cancelled because the initial memory requirement (%s) is greater than the job memory limit set by the administrator (%s).", PrettyPrintUtils.bytePrint(outsideReserve + consideredOpsReserve, true), PrettyPrintUtils.bytePrint(queryMaxAllocation, true)) .build(logger); } final double totalWeights = ops.stream().mapToDouble(t -> t.getProps().getMemoryFactor()).sum(); final long memoryForHeavyOperations = maxMemoryPerNodePerQuery - outsideReserve; if(memoryForHeavyOperations < 1) { throw UserException.memoryError() .message("Query was cancelled because it exceeded the memory limits set by the administrator. Expected at least %s bytes, but only had %s available.", PrettyPrintUtils.bytePrint(outsideReserve, true), PrettyPrintUtils.bytePrint(maxMemoryPerNodePerQuery, true)) .build(logger); } final double baseWeight = memoryForHeavyOperations/totalWeights; ops.stream() .filter(op -> op.getProps().isMemoryBound()) .forEach(op -> { long targetValue = (long) (baseWeight * op.getProps().getMemoryFactor()); targetValue = Math.max(Math.min(targetValue, op.getProps().getMemLimit()), op.getProps().getMemReserve()); long lowLimit = op.getProps().getMemLowLimit(); long highLimit = op.getProps().getMemLimit(); op.getProps().setMemLimit(targetValue); if (targetValue < lowLimit) { op.getProps().setMemLimit(lowLimit); } if (targetValue > highLimit) { op.getProps().setMemLimit(highLimit); } }); } }
MemoryAllocationUtilities { @VisibleForTesting static void setMemory(final OptionManager optionManager, Map<Fragment, Wrapper> fragments, long maxMemoryPerNodePerQuery) { final ArrayListMultimap<NodeEndpoint, PhysicalOperator> consideredOps = ArrayListMultimap.create(); final ArrayListMultimap<NodeEndpoint, PhysicalOperator> nonConsideredOps = ArrayListMultimap.create(); long queryMaxAllocation = Long.MAX_VALUE; for(Entry<Fragment, Wrapper> entry: fragments.entrySet()) { FindConsideredOperators fco = new FindConsideredOperators(); entry.getKey().getRoot().accept(fco, null); for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { consideredOps.putAll(e, fco.getConsideredOperators()); } for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { nonConsideredOps.putAll(e, fco.getNonConsideredOperators()); } } for(NodeEndpoint ep : consideredOps.keySet()) { long outsideReserve = nonConsideredOps.get(ep).stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); List<PhysicalOperator> ops = consideredOps.get(ep); long consideredOpsReserve = ops.stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); if (outsideReserve + consideredOpsReserve > queryMaxAllocation) { throw UserException.resourceError() .message("Query was cancelled because the initial memory requirement (%s) is greater than the job memory limit set by the administrator (%s).", PrettyPrintUtils.bytePrint(outsideReserve + consideredOpsReserve, true), PrettyPrintUtils.bytePrint(queryMaxAllocation, true)) .build(logger); } final double totalWeights = ops.stream().mapToDouble(t -> t.getProps().getMemoryFactor()).sum(); final long memoryForHeavyOperations = maxMemoryPerNodePerQuery - outsideReserve; if(memoryForHeavyOperations < 1) { throw UserException.memoryError() .message("Query was cancelled because it exceeded the memory limits set by the administrator. Expected at least %s bytes, but only had %s available.", PrettyPrintUtils.bytePrint(outsideReserve, true), PrettyPrintUtils.bytePrint(maxMemoryPerNodePerQuery, true)) .build(logger); } final double baseWeight = memoryForHeavyOperations/totalWeights; ops.stream() .filter(op -> op.getProps().isMemoryBound()) .forEach(op -> { long targetValue = (long) (baseWeight * op.getProps().getMemoryFactor()); targetValue = Math.max(Math.min(targetValue, op.getProps().getMemLimit()), op.getProps().getMemReserve()); long lowLimit = op.getProps().getMemLowLimit(); long highLimit = op.getProps().getMemLimit(); op.getProps().setMemLimit(targetValue); if (targetValue < lowLimit) { op.getProps().setMemLimit(lowLimit); } if (targetValue > highLimit) { op.getProps().setMemLimit(highLimit); } }); } } }
MemoryAllocationUtilities { @VisibleForTesting static void setMemory(final OptionManager optionManager, Map<Fragment, Wrapper> fragments, long maxMemoryPerNodePerQuery) { final ArrayListMultimap<NodeEndpoint, PhysicalOperator> consideredOps = ArrayListMultimap.create(); final ArrayListMultimap<NodeEndpoint, PhysicalOperator> nonConsideredOps = ArrayListMultimap.create(); long queryMaxAllocation = Long.MAX_VALUE; for(Entry<Fragment, Wrapper> entry: fragments.entrySet()) { FindConsideredOperators fco = new FindConsideredOperators(); entry.getKey().getRoot().accept(fco, null); for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { consideredOps.putAll(e, fco.getConsideredOperators()); } for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { nonConsideredOps.putAll(e, fco.getNonConsideredOperators()); } } for(NodeEndpoint ep : consideredOps.keySet()) { long outsideReserve = nonConsideredOps.get(ep).stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); List<PhysicalOperator> ops = consideredOps.get(ep); long consideredOpsReserve = ops.stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); if (outsideReserve + consideredOpsReserve > queryMaxAllocation) { throw UserException.resourceError() .message("Query was cancelled because the initial memory requirement (%s) is greater than the job memory limit set by the administrator (%s).", PrettyPrintUtils.bytePrint(outsideReserve + consideredOpsReserve, true), PrettyPrintUtils.bytePrint(queryMaxAllocation, true)) .build(logger); } final double totalWeights = ops.stream().mapToDouble(t -> t.getProps().getMemoryFactor()).sum(); final long memoryForHeavyOperations = maxMemoryPerNodePerQuery - outsideReserve; if(memoryForHeavyOperations < 1) { throw UserException.memoryError() .message("Query was cancelled because it exceeded the memory limits set by the administrator. Expected at least %s bytes, but only had %s available.", PrettyPrintUtils.bytePrint(outsideReserve, true), PrettyPrintUtils.bytePrint(maxMemoryPerNodePerQuery, true)) .build(logger); } final double baseWeight = memoryForHeavyOperations/totalWeights; ops.stream() .filter(op -> op.getProps().isMemoryBound()) .forEach(op -> { long targetValue = (long) (baseWeight * op.getProps().getMemoryFactor()); targetValue = Math.max(Math.min(targetValue, op.getProps().getMemLimit()), op.getProps().getMemReserve()); long lowLimit = op.getProps().getMemLowLimit(); long highLimit = op.getProps().getMemLimit(); op.getProps().setMemLimit(targetValue); if (targetValue < lowLimit) { op.getProps().setMemLimit(lowLimit); } if (targetValue > highLimit) { op.getProps().setMemLimit(highLimit); } }); } } private MemoryAllocationUtilities(); }
MemoryAllocationUtilities { @VisibleForTesting static void setMemory(final OptionManager optionManager, Map<Fragment, Wrapper> fragments, long maxMemoryPerNodePerQuery) { final ArrayListMultimap<NodeEndpoint, PhysicalOperator> consideredOps = ArrayListMultimap.create(); final ArrayListMultimap<NodeEndpoint, PhysicalOperator> nonConsideredOps = ArrayListMultimap.create(); long queryMaxAllocation = Long.MAX_VALUE; for(Entry<Fragment, Wrapper> entry: fragments.entrySet()) { FindConsideredOperators fco = new FindConsideredOperators(); entry.getKey().getRoot().accept(fco, null); for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { consideredOps.putAll(e, fco.getConsideredOperators()); } for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { nonConsideredOps.putAll(e, fco.getNonConsideredOperators()); } } for(NodeEndpoint ep : consideredOps.keySet()) { long outsideReserve = nonConsideredOps.get(ep).stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); List<PhysicalOperator> ops = consideredOps.get(ep); long consideredOpsReserve = ops.stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); if (outsideReserve + consideredOpsReserve > queryMaxAllocation) { throw UserException.resourceError() .message("Query was cancelled because the initial memory requirement (%s) is greater than the job memory limit set by the administrator (%s).", PrettyPrintUtils.bytePrint(outsideReserve + consideredOpsReserve, true), PrettyPrintUtils.bytePrint(queryMaxAllocation, true)) .build(logger); } final double totalWeights = ops.stream().mapToDouble(t -> t.getProps().getMemoryFactor()).sum(); final long memoryForHeavyOperations = maxMemoryPerNodePerQuery - outsideReserve; if(memoryForHeavyOperations < 1) { throw UserException.memoryError() .message("Query was cancelled because it exceeded the memory limits set by the administrator. Expected at least %s bytes, but only had %s available.", PrettyPrintUtils.bytePrint(outsideReserve, true), PrettyPrintUtils.bytePrint(maxMemoryPerNodePerQuery, true)) .build(logger); } final double baseWeight = memoryForHeavyOperations/totalWeights; ops.stream() .filter(op -> op.getProps().isMemoryBound()) .forEach(op -> { long targetValue = (long) (baseWeight * op.getProps().getMemoryFactor()); targetValue = Math.max(Math.min(targetValue, op.getProps().getMemLimit()), op.getProps().getMemReserve()); long lowLimit = op.getProps().getMemLowLimit(); long highLimit = op.getProps().getMemLimit(); op.getProps().setMemLimit(targetValue); if (targetValue < lowLimit) { op.getProps().setMemLimit(lowLimit); } if (targetValue > highLimit) { op.getProps().setMemLimit(highLimit); } }); } } private MemoryAllocationUtilities(); static void setupBoundedMemoryAllocations( final PhysicalPlan plan, final OptionManager optionManager, final GroupResourceInformation clusterInfo, final PlanningSet planningSet, final long allocatedMemoryPerQuery ); }
MemoryAllocationUtilities { @VisibleForTesting static void setMemory(final OptionManager optionManager, Map<Fragment, Wrapper> fragments, long maxMemoryPerNodePerQuery) { final ArrayListMultimap<NodeEndpoint, PhysicalOperator> consideredOps = ArrayListMultimap.create(); final ArrayListMultimap<NodeEndpoint, PhysicalOperator> nonConsideredOps = ArrayListMultimap.create(); long queryMaxAllocation = Long.MAX_VALUE; for(Entry<Fragment, Wrapper> entry: fragments.entrySet()) { FindConsideredOperators fco = new FindConsideredOperators(); entry.getKey().getRoot().accept(fco, null); for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { consideredOps.putAll(e, fco.getConsideredOperators()); } for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { nonConsideredOps.putAll(e, fco.getNonConsideredOperators()); } } for(NodeEndpoint ep : consideredOps.keySet()) { long outsideReserve = nonConsideredOps.get(ep).stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); List<PhysicalOperator> ops = consideredOps.get(ep); long consideredOpsReserve = ops.stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); if (outsideReserve + consideredOpsReserve > queryMaxAllocation) { throw UserException.resourceError() .message("Query was cancelled because the initial memory requirement (%s) is greater than the job memory limit set by the administrator (%s).", PrettyPrintUtils.bytePrint(outsideReserve + consideredOpsReserve, true), PrettyPrintUtils.bytePrint(queryMaxAllocation, true)) .build(logger); } final double totalWeights = ops.stream().mapToDouble(t -> t.getProps().getMemoryFactor()).sum(); final long memoryForHeavyOperations = maxMemoryPerNodePerQuery - outsideReserve; if(memoryForHeavyOperations < 1) { throw UserException.memoryError() .message("Query was cancelled because it exceeded the memory limits set by the administrator. Expected at least %s bytes, but only had %s available.", PrettyPrintUtils.bytePrint(outsideReserve, true), PrettyPrintUtils.bytePrint(maxMemoryPerNodePerQuery, true)) .build(logger); } final double baseWeight = memoryForHeavyOperations/totalWeights; ops.stream() .filter(op -> op.getProps().isMemoryBound()) .forEach(op -> { long targetValue = (long) (baseWeight * op.getProps().getMemoryFactor()); targetValue = Math.max(Math.min(targetValue, op.getProps().getMemLimit()), op.getProps().getMemReserve()); long lowLimit = op.getProps().getMemLowLimit(); long highLimit = op.getProps().getMemLimit(); op.getProps().setMemLimit(targetValue); if (targetValue < lowLimit) { op.getProps().setMemLimit(lowLimit); } if (targetValue > highLimit) { op.getProps().setMemLimit(highLimit); } }); } } private MemoryAllocationUtilities(); static void setupBoundedMemoryAllocations( final PhysicalPlan plan, final OptionManager optionManager, final GroupResourceInformation clusterInfo, final PlanningSet planningSet, final long allocatedMemoryPerQuery ); }