repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
SeqTrans
|
SeqTrans-master/Migration/src/collect/CollectSingleFile.java
|
package collect;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import gumtreediff.gen.srcml.SrcmlJavaTreeGenerator;
import gumtreediff.matchers.MappingStore;
import gumtreediff.matchers.Matcher;
import gumtreediff.matchers.Matchers;
import gumtreediff.tree.ITree;
import gumtreediff.tree.TreeContext;
import split.Split;
import structure.Definition;
import structure.Location;
import structure.Migration;
import structure.SubTree;
import utils.Defuse;
import utils.Output;
import utils.Utils;
public class CollectSingleFile {
public static void main(String[] args) throws Exception {
List<String> parList = Arrays.asList(args);
String inputPath = null;
String location = null;
String outputPath = null;
// if(args.length==0)
// throw new Exception("Plz input file and locations");
// if(!parList.contains("--input"))
// throw new Exception("no input");
// if(!parList.contains("--location"))
// throw new Exception("no location");
// if(!parList.contains("--location")) {
// Path path = Paths.get("");
// String directoryName = path.toAbsolutePath().toString();
// outputPath = directoryName;
// }
//
//
// for(int i=0;i<parList.size();i++) {
// String par = parList.get(i);
// if(par.equals("--input")) {
// String nextPar = parList.get(i+1);
// inputPath = nextPar;
// }
// if(par.equals("--location")) {
// String nextPar = parList.get(i+1);
// location = nextPar;
// }
// if(par.equals("--output")) {
// String nextPar = parList.get(i+1);
// outputPath = nextPar;
// }
// }
inputPath = "D:\\workspace\\eclipse2018\\Migration\\AccountInstance.java";
location = String.valueOf(15);
outputPath = "D:\\workspace\\eclipse2018\\Migration\\test";
filterDefUse(inputPath, Integer.valueOf(location), outputPath);
}
public static void filterDefUse(String input, int location, String output) throws Exception {
File inputFile = new File(input);
String var_path = output+File.separator+"src-val.txt";
String num_path = output+File.separator+"src-num.txt";
String check_path = output+File.separator+"src_check.txt";
TreeContext tc = new SrcmlJavaTreeGenerator().generateFromFile(inputFile);
ITree root = tc.getRoot();
List<ITree> all_nodes = root.getDescendants();
all_nodes.add(root);
System.out.println(all_nodes.size());
ArrayList<ITree> locateNodes = new ArrayList<>();
for(ITree node : all_nodes) {
int start_line = node.getLine();
if(start_line==location) {
locateNodes.add(node);
}
}
if(locateNodes.size()==0)
throw new Exception("plz check the location!");
System.out.println("Analyse: "+input);
Split sp = new Split();
Defuse defuse = new Defuse();
ArrayList<Definition> defs = defuse.getDef(tc, "src");//先计算action,再收集defs
HashMap<String, ArrayList<Definition>> defMap = defuse.transferDefs(defs);
HashMap<ITree, ITree> leaf2parblock_map = defuse.searchBlockMap(tc);
HashMap<ITree, ArrayList<Definition>> blockMap = defuse.transferBlockMap(defs, tc, "src");
ArrayList<SubTree> sub = sp.splitSubTree(tc, input);//Subtree中割裂过block,注意
HashMap<Integer, HashMap<String, String>> usedDefs2Map = new HashMap<Integer, HashMap<String, String>>();
System.out.println("def1size:"+defs.size());
System.out.println("def1Mapsize:"+defMap.size());
System.out.println("block1size:"+blockMap.size());
ArrayList<SubTree> changedSTree = new ArrayList<>();
for(SubTree st : sub) {
ITree t = st.getRoot();
// System.out.println("StID:"+t.getId());
List<ITree> nodeList = t.getDescendants();
nodeList.add(t);
for(ITree node : nodeList) {
if(locateNodes.contains(node)) {
changedSTree.add(st);
// System.out.println("find a action subtree! "+t.getId());
break;
}
}
}//先找包含action的subtree
System.out.println("subSize:"+sub.size());
System.out.println("changeSize:"+changedSTree.size());
for(SubTree st : changedSTree) {
ITree sRoot = st.getRoot();
int id = sRoot.getId();
System.out.println("===================");
System.out.println("StID:"+id);
HashMap<String, String> replaceMap_src = new HashMap<String, String>();
HashSet<Definition> usedDefs1 = new HashSet<Definition>();
Boolean same = false;
ArrayList<ITree> leaves1 = new ArrayList<ITree>();
Utils.traverse2Leaf(sRoot, leaves1);
int labelCount = 0;
for(ITree leaf : leaves1) {
String label = leaf.getLabel();
// System.out.println("label:"+label);
if(!label.equals(""))
labelCount++;
String type = tc.getTypeLabel(leaf);
if(type.equals("literal")) {
leaf.setLabel(Output.deleteLiteral(leaf, tc));
// if(label.contains("\""))
// replaceMap_src.put("@@"+label+"@@", "None");
// else
// replaceMap_src.put("$$"+label+"$$", "num");//replace Literal
}
ArrayList<Definition> stringList = defMap.get(label);
if(stringList!=null) {
ITree parBlock = leaf2parblock_map.get(leaf);
ArrayList<Definition> blockList = blockMap.get(parBlock);
for(Definition def1 : stringList) {
if(blockList!=null) {
if(blockList.contains(def1)) {
if(leaf.getId()>def1.getDefLabelID()) {
usedDefs1.add(def1);
System.out.println("DefTest: "+leaf.getLabel()+","+leaf.getId()+","+def1.getDefLabelID());
// leaf.setLabel("var");
replaceMap_src.put(label, label);
}
}
}
if(def1.getDefLabelID()==leaf.getId()) {
// leaf.setLabel("var");
replaceMap_src.put(label, label);
}
// System.out.println(leaf.getId()+","+def1.getDefLabelID());
// System.out.println("Def:"+def1.getType()+","+def1.getVarName());
}
}
}
if(labelCount==0) {
System.err.println("labelCount is 0 ID:"+id);
continue;
}
Location location1 = new Location(st);
String diffLine_check = "STID:"+st.getRoot().getId()+","
+location1.getBeginLine()+","+location1.getLastLine()+","
+location1.getBeginCol()+","+location1.getLastCol();
String diffLine = input+";"
+location1.getBeginLine()+","+location1.getLastLine()+","
+location1.getBeginCol()+","+location1.getLastCol();
printLineNum(num_path, diffLine);
printDefs(var_path, replaceMap_src, usedDefs1);
printLineCheck(check_path, diffLine_check);
}
}
static private void printLineCheck(String outPath6, String diffLine_check) throws IOException {
File output6 = new File(outPath6);
BufferedWriter wr6 = new BufferedWriter(new FileWriter(output6, true));
wr6.append(diffLine_check);
wr6.newLine();
wr6.flush();
wr6.close();
}
static private void printLineNum(String outPath3, String diffLine) throws Exception {
File output3 = new File(outPath3);
BufferedWriter wr3 = new BufferedWriter(new FileWriter(output3, true));
wr3.append(diffLine);
wr3.newLine();
wr3.flush();
// System.out.println("STID:"+srcT.getRoot().getId()+","+dstT.getRoot().getId());
// System.out.println(replaceMap_dst.size());
wr3.close();
}
static private void printDefs(String outPath4, HashMap<String , String> replaceMap_src,
HashSet<Definition> usedDefs1) throws IOException {
File output4 = new File(outPath4);
BufferedWriter wr4 = new BufferedWriter(new FileWriter(output4, true));
for(Definition def1 : usedDefs1) {
SubTree st1 = new SubTree(def1.getRoot(), def1.getTc(), 0, "");
Location location1 = new Location(st1);
wr4.append(+location1.getBeginLine()+","+location1.getLastLine()
+","+location1.getBeginCol()+","+location1.getLastCol()+";");
}
for(Map.Entry<String, String> entry : replaceMap_src.entrySet()) {
String varName = entry.getKey();
String label = entry.getValue();
wr4.append(varName+"->"+label+";");
}
wr4.newLine();
wr4.flush();
wr4.close();
}
}
| 8,390 | 31.905882 | 107 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/collect/CommitFileList.java
|
package collect;
import java.io.BufferedWriter;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.LinkedList;
import java.util.List;
import org.apache.commons.exec.CommandLine;
import org.apache.commons.exec.DefaultExecuteResultHandler;
import org.apache.commons.exec.DefaultExecutor;
import org.eclipse.jgit.api.Git;
import org.eclipse.jgit.api.errors.GitAPIException;
import org.eclipse.jgit.diff.DiffEntry;
import org.eclipse.jgit.diff.DiffFormatter;
import org.eclipse.jgit.diff.RawTextComparator;
import org.eclipse.jgit.lib.ObjectId;
import org.eclipse.jgit.lib.ObjectReader;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.revwalk.RevCommit;
import org.eclipse.jgit.revwalk.RevWalk;
import org.eclipse.jgit.storage.file.FileRepositoryBuilder;
import org.eclipse.jgit.treewalk.CanonicalTreeParser;
import structure.ChangePair;
import utils.FileOperation;
public class CommitFileList {
/**
* Collecting API change pairs from git commit diff logs
* @throws Exception
*/
public static void main(String[] args) throws Exception {
// TODO Auto-generated method stub
String versionCommit="66471836f584d5c73be18367e1db4c4783b0cb48";//��Ҫ������Commit Hash
String path="D:\\workspace\\poi\\";//��Ӧ��Ŀ�ڱ���Repo��·��
// autoExtraction(versionCommit, path);
getChangeList(versionCommit, path);
}
public static void getChangeList(String versionCommit, String classPath) throws Exception {
FileRepositoryBuilder builder = new FileRepositoryBuilder();
builder.setMustExist(true);
builder.addCeilingDirectory(new File(classPath));
builder.findGitDir(new File(classPath));
Repository repo;
repo = builder.build();
RevWalk walk = new RevWalk(repo);
ObjectId versionId=repo.resolve(versionCommit);
RevCommit currentCommit=walk.parseCommit(versionId);
walk.close();
System.out.println(currentCommit.getName());
LinkedList<RevCommit> commits = getCommitList(currentCommit, repo);
ArrayList<ChangePair> changePairs = new ArrayList<ChangePair>();
for(int i=0;i<commits.size()-1;i++) {
RevCommit newCommit = commits.get(i);
RevCommit oldCommit = commits.get(i+1);
ChangePair cp = getChangPair(newCommit, oldCommit, repo);
changePairs.add(cp);//all the changePairs in this list
}
File list = new File("changeList.txt");
BufferedWriter wr = new BufferedWriter(new FileWriter(list));
for(int i=0;i<changePairs.size();i++) {
ChangePair cp = changePairs.get(i);
RevCommit newCommit = cp.getNewCommit();
String newCommitName = newCommit.getName();
RevCommit oldCommit = cp.getOldCommit();
String oldCommitName = oldCommit.getName();
wr.append(newCommitName+","+oldCommitName);
wr.newLine();
wr.flush();
}
wr.close();
}
public static void autoExtraction(String versionCommit, String classPath) throws Exception {
FileRepositoryBuilder builder = new FileRepositoryBuilder();
builder.setMustExist(true);
builder.addCeilingDirectory(new File(classPath));
builder.findGitDir(new File(classPath));
Repository repo;
repo = builder.build();
RevWalk walk = new RevWalk(repo);
ObjectId versionId=repo.resolve(versionCommit);
RevCommit currentCommit=walk.parseCommit(versionId);
walk.close();
System.out.println(currentCommit.getName());
LinkedList<RevCommit> commits = getCommitList(currentCommit, repo);
ArrayList<ChangePair> changePairs = new ArrayList<ChangePair>();
for(int i=0;i<commits.size()-1;i++) {
RevCommit newCommit = commits.get(i);
RevCommit oldCommit = commits.get(i+1);
ChangePair cp = getChangPair(newCommit, oldCommit, repo);
changePairs.add(cp);//all the changePairs in this list
}
int count = 0;
for(ChangePair cp : changePairs) {
RevCommit newCommit = cp.getNewCommit();
RevCommit oldCommit = cp.getOldCommit();
List<DiffEntry> diffs = cp.getDiffs();
count = runExec(classPath, newCommit, oldCommit, count, diffs, repo);
System.out.println("endExec");
}
}
private static int runExec(String classPath, RevCommit newCommit, RevCommit oldCommit, int i, List<DiffEntry> diffs, Repository repo) throws Exception {
String diskpath = "J:\\Telegram_commit\\";
String newCommitName = newCommit.getName();
String oldCommitName = oldCommit.getName();
String rootPath = diskpath+"cp"+String.valueOf(i)+"\\"+newCommitName+"\\";
String line = "cmd.exe /C git checkout "+newCommitName;
CommandLine cmdLine = CommandLine.parse(line);
DefaultExecuteResultHandler resultHandler = new DefaultExecuteResultHandler();
DefaultExecutor executor = new DefaultExecutor();
executor.setExitValue(1); //��������ִ���˳�ֵΪ1���������ɹ�ִ�в���û�д�����1
executor.setWorkingDirectory(new File(classPath));//���ù���Ŀ¼
executor.execute(cmdLine, resultHandler);
Thread.sleep(1000);
ArrayList<DiffEntry> filterDiffs = getUsefulDiffs(diffs);
System.out.println("Diffsize:"+filterDiffs.size());
if(filterDiffs.size()==0) {
return i;// continue the next iter
}
String diffDir = diskpath+"cp"+String.valueOf(i)+"\\diff_logs\\";
File diffDirFile = new File(diffDir);
if (!diffDirFile.exists()) {
diffDirFile.mkdirs();
}
int count = 0;
for (DiffEntry entry : filterDiffs) {
ByteArrayOutputStream out = new ByteArrayOutputStream();
DiffFormatter df = new DiffFormatter(out);
df.setDiffComparator(RawTextComparator.WS_IGNORE_ALL);
df.setRepository(repo);
String path = diffDir+"diff"+String.valueOf(count)+".txt";
BufferedWriter wr = new BufferedWriter(new FileWriter(new File(path)));
df.format(entry);
String diffText = out.toString("UTF-8");
// System.out.println(diffText);
wr.append(diffText);
wr.close();
df.close();
count++;
}
String diffPath = diskpath+"cp"+String.valueOf(i)+"\\diffs.txt";
File diffFile = new File(diffPath);
if (!diffFile.getParentFile().exists()) {
diffFile.getParentFile().mkdirs();
}
String tagPath = diskpath+"cp"+String.valueOf(i)+"\\tags.txt";
BufferedWriter wr = new BufferedWriter(new FileWriter(diffFile));
BufferedWriter wr1 = new BufferedWriter(new FileWriter(tagPath));
wr.append(oldCommitName+";"+newCommitName);
wr.newLine();
wr.flush();//��һ�����commit hash
wr1.append("newCommit:\n"+newCommit.getFullMessage());
wr1.newLine();
wr1.append("oldCommit:\n"+oldCommit.getFullMessage());
wr1.close();
for (DiffEntry entry : filterDiffs) {
wr.append(entry.getOldPath()+";"+entry.getNewPath());
wr.newLine();
wr.flush();
String newFilePath = classPath+entry.getNewPath();
String copyPath = rootPath+entry.getNewPath();
FileOperation.copyFile(new File(newFilePath), new File(copyPath));//copy changeFile
}
resultHandler.waitFor();
Thread.sleep(5000);
String rootPath1 = diskpath+"cp"+String.valueOf(i)+"\\"+oldCommitName+"\\";
String line1 = "cmd.exe /C git checkout "+oldCommitName;
CommandLine cmdLine1 = CommandLine.parse(line1);
DefaultExecuteResultHandler resultHandler1 = new DefaultExecuteResultHandler();
DefaultExecutor executor1 = new DefaultExecutor();
executor1.setExitValue(1); //��������ִ���˳�ֵΪ1���������ɹ�ִ�в���û�д�����1
executor1.setWorkingDirectory(new File(classPath));//���ù���Ŀ¼
executor1.execute(cmdLine1, resultHandler1);
Thread.sleep(1000);
for (DiffEntry entry : filterDiffs) {
String oldFilePath = classPath+entry.getOldPath();
String copyPath = rootPath1+entry.getOldPath();
FileOperation.copyFile(new File(oldFilePath), new File(copyPath));//copy changeFile
}
resultHandler1.waitFor();
Thread.sleep(5000);
i++;
wr.close();
return i;
}//Execute checkout and copy diffs
public static ArrayList<DiffEntry> getUsefulDiffs(List<DiffEntry> diffs){
ArrayList<DiffEntry> filterDiffs = new ArrayList<DiffEntry>();
for (DiffEntry entry : diffs) {
String oldFilePath = entry.getOldPath();
String newFilePath = entry.getNewPath();
if(oldFilePath.contains("/dev/null")||newFilePath.contains("/dev/null")) {
continue;//������ɾ���ļ���������ļ����������ʾ·��Ϊ���������Ҫ����changepair
}else if(oldFilePath.contains(".java")&&newFilePath.contains(".java")){
filterDiffs.add(entry);
}//ɾ�����ļ�commit���Ҳ���
}
return filterDiffs;
}
public static LinkedList<RevCommit> getCommitList(RevCommit startCommit, Repository repo) throws Exception{
LinkedList<RevCommit> commits = new LinkedList<RevCommit>();
RevWalk walk = new RevWalk(repo);
walk.markStart(startCommit);
for(RevCommit rev : walk) {
commits.add(rev);
}
walk.close();
return commits;
}
public static ChangePair getChangPair(RevCommit commit1, RevCommit commit2, Repository repo) throws Exception {
List<DiffEntry> returnDiffs = null;
ObjectId head = commit1.getTree().getId();
ObjectId oldHead = commit2.getTree().getId();
// System.out.println("Printing diff between the Revisions: " + commit1.getName() + " and " + commit2.getName());
// prepare two iterators to compute the diffs
try (ObjectReader reader = repo.newObjectReader()) {
CanonicalTreeParser oldTreeIter = new CanonicalTreeParser();
oldTreeIter.reset(reader, oldHead);
CanonicalTreeParser newTreeIter = new CanonicalTreeParser();
newTreeIter.reset(reader, head);
// finally get the list of changed files
try (Git git = new Git(repo)) {
List<DiffEntry> diffs= git.diff()
.setNewTree(newTreeIter)
.setOldTree(oldTreeIter)
.call();
returnDiffs=diffs;
// for (DiffEntry entry : returnDiffs) {
// System.out.println(entry.getNewPath());
// System.out.println("------------------");
// }
} catch (GitAPIException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
ChangePair cp = new ChangePair(commit1, commit2, returnDiffs);
return cp;
}
public static RevCommit getPrevHash(RevCommit commit, Repository repo) throws IOException {
RevCommit previous = null;
try (RevWalk walk = new RevWalk(repo)) {
// Starting point
walk.markStart(commit);
int count = 0;
for (RevCommit rev : walk) {
// got the previous commit.
if (count == 1) {
previous = rev;
}
count++;
}
}
//Reached end and no previous commits.
return previous;
}
static void printTime(int commitTime) {
SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
String timestampString=String.valueOf(commitTime);
Long timestamp = Long.parseLong(timestampString) * 1000;
String date = formatter.format(new Date(timestamp));
System.out.println(date);
}
}
| 10,989 | 37.159722 | 153 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/collect/DiffAnalysis.java
|
package collect;
import java.io.BufferedWriter;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.exec.CommandLine;
import org.apache.commons.exec.DefaultExecuteResultHandler;
import org.apache.commons.exec.DefaultExecutor;
import org.eclipse.jgit.diff.DiffEntry;
import org.eclipse.jgit.diff.DiffFormatter;
import org.eclipse.jgit.diff.RawTextComparator;
import org.eclipse.jgit.lib.ObjectId;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.revwalk.RevCommit;
import org.eclipse.jgit.revwalk.RevWalk;
import org.eclipse.jgit.storage.file.FileRepositoryBuilder;
import structure.API;
import structure.ChangePair;
import structure.Diff;
import utils.FileOperation;
import utils.ReadAPI;
public class DiffAnalysis {
/**
* Analysing diffs between each commit pair
* @throws Exception
*/
private static LinkedHashSet<API> apis = new LinkedHashSet<API>();
private static LinkedHashSet<Diff> diffs = new LinkedHashSet<Diff>();
private static HashMap<String, ArrayList<Diff>> diffMap = new HashMap<String, ArrayList<Diff>>();
public static void main(String[] args) throws Exception{
String path = "apis";
apis = ReadAPI.readAPI(path);
String versionCommit="28eb8dfd0ef959fd5ad7d5d22f1d32879707c0a0";//ҪCommit Hash
String endCommit = "0f4b89015308ca85c5304dd6e16c0c4b4c3cad3f";//Commitֹͣ
String path1="J:\\Telegram\\";//ӦĿڱRepo·
autoExtraction(versionCommit, path1, endCommit);
}
public static void autoExtraction(String versionCommit, String classPath, String endCommit) throws Exception {
FileRepositoryBuilder builder = new FileRepositoryBuilder();
builder.setMustExist(true);
builder.addCeilingDirectory(new File(classPath));
builder.findGitDir(new File(classPath));
Repository repo;
repo = builder.build();
RevWalk walk = new RevWalk(repo);
ObjectId versionId=repo.resolve(versionCommit);
RevCommit currentCommit=walk.parseCommit(versionId);
walk.close();
System.out.println(currentCommit.getName());
LinkedList<RevCommit> commits = CommitFileList.getCommitList(currentCommit, repo);
ArrayList<ChangePair> changePairs = new ArrayList<ChangePair>();
for(int i=0;i<commits.size()-1;i++) {
RevCommit newCommit = commits.get(i);
RevCommit oldCommit = commits.get(i+1);
ChangePair cp = CommitFileList.getChangPair(newCommit, oldCommit, repo);
changePairs.add(cp);//all the changePairs in this list
if(oldCommit.getId().toString().contains(endCommit))
break;
}
for(ChangePair cp : changePairs) {
List<DiffEntry> diffs = cp.getDiffs();
RevCommit newCommit = cp.getNewCommit();
RevCommit oldCommit = cp.getOldCommit();
List<DiffEntry> filterDiffs = CommitFileList.getUsefulDiffs(diffs);
for(int i=0;i<filterDiffs.size();i++) {
DiffEntry diffEntry = filterDiffs.get(i);
analyseDiffText(diffEntry, newCommit, oldCommit, repo);
}
}
System.out.println("size:"+diffs.size());
System.out.println("Mapsize:"+diffMap.size());
int count=0;
for(Map.Entry<String, ArrayList<Diff>> entry : diffMap.entrySet()) {
String newCommitName = entry.getKey();
ArrayList<Diff> diffList = entry.getValue();
String oldCommitName = diffList.get(0).getOldCommitId();
count = runExec(classPath, newCommitName, oldCommitName, count, diffList, repo);
}
}
private static int runExec(String classPath, String newCommitName, String oldCommitName, int i, ArrayList<Diff> diffList, Repository repo) throws Exception {
String diskpath = "J:\\Telegram_test\\";
String rootPath = diskpath+"cp"+String.valueOf(i)+"\\"+newCommitName+"\\";
String line = "cmd.exe /C git checkout "+newCommitName;
CommandLine cmdLine = CommandLine.parse(line);
System.out.println("commit at:"+newCommitName);
DefaultExecuteResultHandler resultHandler = new DefaultExecuteResultHandler();
DefaultExecutor executor = new DefaultExecutor();
executor.setExitValue(1); //ִ˳ֵΪ1ɹִвûд1
executor.setWorkingDirectory(new File(classPath));//ùĿ¼
executor.execute(cmdLine, resultHandler);
Thread.sleep(5000);
System.out.println("Diffsize:"+diffList.size());
if(diffList.size()==0) {
return i;// continue the next iter
}
String diffPath = diskpath+"cp"+String.valueOf(i)+"\\diffs.txt";
File diffFile = new File(diffPath);
if (!diffFile.getParentFile().exists()) {
diffFile.getParentFile().mkdirs();
}
BufferedWriter wr = new BufferedWriter(new FileWriter(diffFile));
wr.append(oldCommitName+";"+newCommitName);
wr.newLine();
wr.flush();//һcommit hash
HashSet<String> fileList = new HashSet<String>();
for (Diff diff : diffList) {
String filePath = diff.getNewPath();
if(!fileList.contains(filePath)) {
wr.append(diff.getOldPath()+";"+diff.getNewPath());
wr.newLine();
wr.flush();
String newFilePath = classPath+filePath;
String copyPath = rootPath+filePath;
FileOperation.copyFile(new File(newFilePath), new File(copyPath));//copy changeFile
fileList.add(filePath);
}
}
System.out.println("copy complete");
resultHandler.waitFor();
Thread.sleep(5000);
String rootPath1 = diskpath+"cp"+String.valueOf(i)+"\\"+oldCommitName+"\\";
String line1 = "cmd.exe /C git checkout "+oldCommitName;
CommandLine cmdLine1 = CommandLine.parse(line1);
System.out.println("commit at:"+oldCommitName);
DefaultExecuteResultHandler resultHandler1 = new DefaultExecuteResultHandler();
DefaultExecutor executor1 = new DefaultExecutor();
executor1.setExitValue(1); //ִ˳ֵΪ1ɹִвûд1
executor1.setWorkingDirectory(new File(classPath));//ùĿ¼
executor1.execute(cmdLine1, resultHandler1);
Thread.sleep(5000);
HashSet<String> fileList2 = new HashSet<String>();
for (Diff diff : diffList) {
String filePath = diff.getOldPath();
if(!fileList2.contains(filePath)) {
String oldFilePath = classPath+diff.getOldPath();
String copyPath = rootPath1+diff.getOldPath();
FileOperation.copyFile(new File(oldFilePath), new File(copyPath));//copy changeFile
fileList2.add(filePath);
}
}
System.out.println("copy complete");
resultHandler1.waitFor();
Thread.sleep(5000);
i++;
wr.close();
return i;
}//Execute checkout and copy diffs
public static void analyseDiffText(DiffEntry diffEntry, RevCommit newCommit, RevCommit oldCommit, Repository repo) throws Exception {
ByteArrayOutputStream out = new ByteArrayOutputStream();
DiffFormatter df = new DiffFormatter(out);
df.setDiffComparator(RawTextComparator.WS_IGNORE_ALL);
df.setRepository(repo);
df.format(diffEntry);
String diffText = out.toString("UTF-8");
String[] texts = diffText.split("\n");
String newCommitName = newCommit.getName();
String oldCommitName = oldCommit.getName();
for(int i=0;i<texts.length;i++) {
String text = texts[i];
int oldBeginLine = 0;
int oldEndLine = 0;
int newBeginLine = 0;
int newEndLine = 0;
if(i==0&&!text.contains(".java"))
throw new Exception("error, not contains java file!");
if(text.contains("@@")) {
Pattern p = Pattern.compile("\\d{1,}");//1ֵָٸ
Matcher m = p.matcher(text);
int n = 0;
while(m.find()) {
if(n==0) {
oldBeginLine = Integer.valueOf(m.group());
}else if(n==1){
oldEndLine = oldBeginLine+Integer.valueOf(m.group());
}else if(n==2) {
newBeginLine = Integer.valueOf(m.group());
}else if(n==3) {
newEndLine = newBeginLine+Integer.valueOf(m.group());
}else
throw new Exception("error n!");
n++;
}
i++;
text = texts[i];
String addLine = "";
String deleteLine = "";
while(!text.contains("@@")&&i<texts.length-1) {
String first = text.substring(0, 1);
if(first.contains("+")) {
addLine += text;
}else if(first.contains("-")) {
deleteLine += text;
}
i++;
text = texts[i];
}
Boolean containsAPI = containsAPI(deleteLine, addLine);
if (containsAPI) {
String newid = newCommitName;
String oldid = oldCommitName;
String oldPath = diffEntry.getOldPath();
String newPath = diffEntry.getNewPath();
Diff diff = new Diff(oldid, newid, oldPath, newPath, deleteLine, addLine);
diff.setOldBeginLine(oldBeginLine);
diff.setOldEndLine(oldEndLine);
diff.setNewBeginLine(newBeginLine);
diff.setNewEndLine(newEndLine);
diffs.add(diff);
if(diffMap.get(newid)==null) {
ArrayList<Diff> diffList = new ArrayList<Diff>();
diffList.add(diff);
diffMap.put(newid, diffList);
}else {
diffMap.get(newid).add(diff);
}
}
}
}
// String path1 = "test//diff"+String.valueOf(count)+".txt";
// BufferedWriter wr = new BufferedWriter(new FileWriter(new File(path1)));
// System.out.println(diffText);
// wr.append("test"+diffText);
// wr.close();
df.close();
}
public static Boolean containsAPI(String src, String dst) {
for(API api : apis) {
String cName = api.getClassName();
String mName = api.getMethodName();
if(src.contains(mName)) {
// if(src.contains(cName)) {
// System.out.println("find public static api");
// }
// else System.out.println("find api");
return true;
}
if(dst.contains(mName)) {
// if(dst.contains(cName)) {
// System.out.println("find public static api");
// }
// else System.out.println("find api");
return true;
}
}
return false;
}
}
| 9,748 | 33.817857 | 158 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/collect/FileFilter.java
|
package collect;
import gumtreediff.gen.srcml.SrcmlJavaTreeGenerator;
import gumtreediff.matchers.MappingStore;
import gumtreediff.matchers.Matcher;
import gumtreediff.matchers.Matchers;
import gumtreediff.tree.TreeContext;
import structure.API;
import structure.Migration;
import utils.FileOperation;
import java.io.*;
import java.util.*;
public class FileFilter {
private static LinkedHashSet<API> apis = new LinkedHashSet<API>();
public static void main(String[] args) throws Exception{
String path = "apis";
// apis = ReadAPI.readAPI(path);
// for(String api : apis) {
// System.out.println(api);
// }
// FileFilter.Filter("J:\\test1\\");
}
public static void Filter(String path) throws Exception {
File validFile = new File("validFiles.txt");
BufferedWriter wr = new BufferedWriter(new FileWriter(validFile));
File rootDir = new File(path);
File[] dirs = rootDir.listFiles();
HashMap<String, HashSet<String>> importMap = getImportList();
HashSet<String> imports = new HashSet<String>();
for(Map.Entry<String, HashSet<String>> entry : importMap.entrySet()) {
String key = entry.getKey();
imports.add(key);
}
for(File dir : dirs) {
System.out.println("Analyse "+dir.getName());
if(dir.listFiles().length!=3) {//two commit dirs and the diff logs
wr.close();
throw new Exception("Error!");
}
String diffPath = dir.getAbsolutePath()+"\\diffs.txt";
File diffFile = new File(diffPath);
BufferedReader br = new BufferedReader(new FileReader(diffFile));
String tmpline = br.readLine();
String oldCommit = tmpline.split(";")[0];
String newCommit = tmpline.split(";")[1];
while((tmpline=br.readLine())!=null) {
String[] diff = tmpline.split(";");
String srcDiff = diff[0];
String tgtDiff = diff[1];
String srcPath = dir.getAbsolutePath()+"\\"+oldCommit+"\\"+srcDiff;
String tgtPath = dir.getAbsolutePath()+"\\"+newCommit+"\\"+tgtDiff;
BufferedReader br1 = new BufferedReader(new FileReader(new File(srcPath)));
BufferedReader br2 = new BufferedReader(new FileReader(new File(tgtPath)));
String tmpline1 = "";
Boolean preserve = false;
Boolean containsImport = false;
ArrayList<String> tmpImports = new ArrayList<String>();
while((tmpline1=br1.readLine())!=null) {//��srcFile��������API�ļ�
if(tmpline1.contains("import")) {
if(tmpline1.split(" ").length<2)
continue;
String className = tmpline1.split(" ")[tmpline1.split(" ").length-1];
className = className.substring(0, className.length()-1);//delete ";"
if(imports.contains(className)) {
containsImport = true;
tmpImports.add(className);
}
}
if(containsImport==true) {
for(String imp : tmpImports) {
HashSet<String> methods = importMap.get(imp);
for(String tmp : methods) {
if(tmpline1.contains(tmp)) {
preserve = true;
break;
}
}
}
}
}
br1.close();
containsImport = false;//reset
tmpImports = new ArrayList<String>();//reset
while((tmpline1=br2.readLine())!=null) {//��tgtFile��������API�ļ�
if(tmpline1.contains("import")) {
if(tmpline1.split(" ").length<2)
continue;
String className = tmpline1.split(" ")[tmpline1.split(" ").length-1];
className = className.substring(0, className.length()-1);//delete ";"
if(imports.contains(className)) {
containsImport = true;
tmpImports.add(className);
}
}
if(containsImport==true) {
for(String imp : tmpImports) {
HashSet<String> methods = importMap.get(imp);
for(String tmp : methods) {
if(tmpline1.contains(tmp)) {
preserve = true;
break;
}
}
}
}
}
br2.close();
if(preserve==true) {
wr.append(srcPath+";"+tgtPath);
wr.newLine();
wr.flush();
}
}
br.close();
}
wr.close();
}
private static HashMap<String, HashSet<String>> getImportList() throws Exception {
if(apis.size()==0)
throw new Exception("error!");
HashMap<String, HashSet<String>> importMap = new HashMap<String, HashSet<String>>();
for(API api : apis) {
String methodName = api.getMethodName();
String importName = api.getLongName().substring(0, api.getLongName().lastIndexOf("."));
if(!importMap.containsKey(importName)) {
HashSet<String> list = new HashSet<String>();
list.add(methodName);
importMap.put(importName, list);
}else {
importMap.get(importName).add(methodName);
}
}
return importMap;
}
public static ArrayList<Migration> readMigrationList(String path, String filter) throws Exception{
ArrayList<Migration> migrates = new ArrayList<Migration>();
File cpFile = new File(path);
System.err.println("Analyse:"+ cpFile.getName());
String diffPath = cpFile.getAbsolutePath()+"\\diffs.txt";
File diffFile = new File(diffPath);
if(!diffFile.exists())
throw new Exception("file is not existed!");
BufferedReader br = new BufferedReader(new FileReader(diffFile));
String tmpline = br.readLine();
String repoName = tmpline;
tmpline = br.readLine();
String srcHash = tmpline.split(";")[0];
String dstHash = tmpline.split(";")[1];
while((tmpline=br.readLine())!=null) {
String path1 = tmpline.split(";")[0];
String path2 = tmpline.split(";")[1];
path1 = cpFile.getPath()+"//"+srcHash+"//"+path1;
path2 = cpFile.getPath()+"//"+dstHash+"//"+path2;
File srcFile = new File(path1);
if (!srcFile.exists()) {
br.close();
throw new Exception("srcfile is not existed!");
}
System.out.println("Analyse:"+ srcFile.getName());
Calendar calendar = Calendar.getInstance();
Date time = calendar.getTime();
System.out.println(time);
File dstFile = new File(path2);
if (!dstFile.exists()) {
br.close();
throw new Exception("dstfile is not existed!");
}
try {
TreeContext tc1 = new SrcmlJavaTreeGenerator().generateFromFile(srcFile);
//need to be changed by different languages
TreeContext tc2 = new SrcmlJavaTreeGenerator().generateFromFile(dstFile);
Matcher m = Matchers.getInstance().getMatcher(tc1.getRoot(), tc2.getRoot());
m.match();
MappingStore mappings = m.getMappings();
System.out.println("Mapping size: "+mappings.asSet().size());
Migration mi = new Migration(tc1, tc2, mappings, srcFile.getAbsolutePath(), dstFile.getAbsolutePath());
mi.setRepoName(repoName);
mi.setSrcHash(srcHash);
mi.setDstHash(dstHash);
migrates.add(mi);
} catch (Exception e) {
continue;
// TODO: handle exception
}
}
br.close();
return migrates;
}
public static ArrayList<Migration> readTufanoList(String path) throws Exception{
ArrayList<Migration> migrates = new ArrayList<Migration>();
File cpFile = new File(path);
String hashID = cpFile.getName();
System.err.println("Analyse:"+ cpFile.getName());
String srcDiffPath = cpFile.getAbsolutePath()+"\\P_dir\\";
String dstDiffPath = cpFile.getAbsolutePath()+"\\F_dir\\";
ArrayList<File> srcList = new ArrayList<File>();
ArrayList<File> dstList = new ArrayList<File>();
FileOperation.traverseFolder(srcDiffPath, srcList);
FileOperation.traverseFolder(dstDiffPath, dstList);
if(srcList.size()==0||dstList.size()==0) {
System.err.println("file is not existed!");
return migrates;
}
System.out.println("FileSize:"+srcList.size()+","+dstList.size());
if(srcList.size()>dstList.size()) {
System.err.println("file number is not the same!");
return migrates;
}
for(File srcFile : srcList) {
if(srcFile.length()>1048000)
continue;// skip the file that bigger than 2MB
if(srcFile.getName().equals("Run.java")||srcFile.getName().equals("DatabaseVersioningService.java"))
continue;//This file takes too much time to analyze
String beforeName = srcFile.getName();
int count = 0;
File targetFile = null;
List<File> targetList = new ArrayList<File>();
for(File dstFile : dstList) {
String afterName = dstFile.getName();
if(afterName.equals(beforeName)) {
count++;
targetList.add(dstFile);
}
}
if(count==0) {
throw new Exception("dstfile is not existed!");
}else if(count ==1) {
targetFile = targetList.get(0);
}else if(count>1){
String targetPath = srcFile.getAbsolutePath();
String[] targetPaths = targetPath.split("\\\\");
targetPath = targetPath.replace("P_dir", "F_dir");
Boolean find = false;
for(File tmpFile : targetList) {
if(tmpFile.getAbsolutePath().equals(targetPath)) {
targetFile = tmpFile;
find = true;
}
}
if(find==false) {//find the file that has the most similar score
int score = 0;
System.err.println("contain the duplicate!");
for(File tmpFile : targetList) {
String[] tmpPaths = tmpFile.getAbsolutePath().split("\\\\");
int tmpScore = 0;
for(int i=0;i<tmpPaths.length-1&&i<targetPaths.length-1;i++) {
if(tmpPaths[i].equals(targetPaths[i]))
tmpScore++;
}
if(tmpScore>score) {
score = tmpScore;
targetFile = tmpFile;
find = true;
}else if(score!=0&&tmpScore==score) {
System.err.println(targetFile.getAbsolutePath());
throw new Exception("Duplicate dst file!");
}
}
}
}
System.out.println("Analyse:"+ beforeName+", Size:"+srcFile.length());
Calendar calendar = Calendar.getInstance();
Date time = calendar.getTime();
System.out.println(time);
try {
TreeContext tc1 = new SrcmlJavaTreeGenerator().generateFromFile(srcFile);
TreeContext tc2 = new SrcmlJavaTreeGenerator().generateFromFile(targetFile);
Matcher m = Matchers.getInstance().getMatcher(tc1.getRoot(), tc2.getRoot());
m.match();
MappingStore mappings = m.getMappings();
Migration mi = new Migration(tc1, tc2, mappings, srcFile.getAbsolutePath(), targetFile.getAbsolutePath());
mi.setRepoName(hashID);
System.out.println("Mapping size: "+mappings.asSet().size());
migrates.add(mi);
} catch (Exception e) {
continue;
// TODO: handle exception
}
}
return migrates;
}
}
| 10,214 | 33.510135 | 110 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/collect/FilterDefuse.java
|
package collect;
import gumtreediff.actions.ActionGenerator;
import gumtreediff.actions.model.Action;
import gumtreediff.io.TreeIoUtils;
import gumtreediff.matchers.MappingStore;
import gumtreediff.matchers.Matcher;
import gumtreediff.matchers.Matchers;
import gumtreediff.tree.ITree;
import gumtreediff.tree.TreeContext;
import split.Split;
import structure.API;
import structure.Definition;
import structure.Location;
import structure.Migration;
import structure.SubTree;
import utils.Defuse;
import utils.Output;
import utils.Utils;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.*;
/*
* statement-level single line extraction
*/
public class FilterDefuse {
private static LinkedHashSet<API> apis = new LinkedHashSet<API>();
private static HashMap<SubTree, SubTree> treePairs = new HashMap<SubTree, SubTree>();
private static int count = 0;
public static void main (String args[]) throws Exception{
String path = "I:\\20210714-Srqtrans_testcase\\Vulnerability_trainset\\";
String outMode = "lineNum";
String numDir = "data_num\\";
String checkDir = "data_check\\";
String varDir = "data_var\\";
// multiCollect(path, outMode, numDir);
String cpPath = path+"cp329";
FilterDefuse defuse = new FilterDefuse();
defuse.collectDiffwithDefUse(cpPath, outMode, true, true, "");
}
public static void multiCollect(String path, String outMode, String numDir) throws Exception {
// if(outMode.equals("txt"))
// FileOperation.delAllFile(dataDir);
// if(outMode.equals("lineNum")) {
// FileOperation.delAllFile(numDir);
// FileOperation.delAllFile(varDir);
// FileOperation.delAllFile(checkDir);
// }
if(outMode.equals("json")) {
String jpath = "jsons\\";
File jFile = new File(jpath);
if(!jFile.exists())
jFile.mkdirs();
if(jFile.listFiles().length!=0&&outMode.equals("json"))
throw new Exception("pls clean dir!");
}
ArrayList<String> existList = checkExist(numDir);
File rootFile = new File(path);
File[] fileList = rootFile.listFiles();
System.out.println(fileList.length);
for(int i=0;i<fileList.length;i++) {
File cpFile = fileList[i];
System.out.println(i+":"+cpFile.getName());
if(existList.contains(cpFile.getName()))
continue;
String cpPath = cpFile.getAbsolutePath();
FilterDefuse defuse = new FilterDefuse();
defuse.collectDiffwithDefUse(cpPath, outMode, true, true, "");
}
System.out.println("DuplicateNum:"+count);
}
public void collectDiffwithDefUse(String path, String outMode,
Boolean ifOnlyChange, Boolean ifPrintDef, String filter) throws Exception {//获取DefUse
Split sp = new Split();
ArrayList<Migration> migrats = FileFilter.readMigrationList(path, filter);
String repoName = "";
if(migrats.size()!=0)
repoName = migrats.get(0).getRepoName();
else
return;
String txtName = (new File(path)).getName();
String jpath = "jsons\\";
File jFile = new File(jpath);
if(!jFile.exists())
jFile.mkdirs();
String outPath = "data\\defuse_"+txtName+".txt";
String outPath1 = "data\\src-val_"+txtName+".txt";
String outPath2 = "data\\tgt-val_"+txtName+".txt";
String outPath3 = "data_num\\"+repoName+"_"+txtName+".txt";
String outPath4 = "data_var\\"+repoName+"_"+txtName+"_defs_src.txt";
String outPath5 = "data_var\\"+repoName+"_"+txtName+"_defs_dst.txt";
String outPath6 = "data_check\\"+repoName+"_"+txtName+".txt";
int errCount = 0;
for(Migration migrat : migrats) {
Defuse defuse = new Defuse();
String miName_src = migrat.getMiName_src();
String miName_dst = migrat.getMiName_dst();
TreeContext sTC = migrat.getSrcT();
TreeContext dTC = migrat.getDstT();
MappingStore mappings = migrat.getMappings();
HashMap<ITree, ITree> leaf2parblock_map_src = defuse.searchBlockMap(sTC);
HashMap<ITree, ITree> leaf2parblock_map_dst = defuse.searchBlockMap(dTC);
System.out.println("Analyse:"+miName_src);
ArrayList<SubTree> changedSTree = new ArrayList<>();
HashMap<String, LinkedList<Action>> actions = Utils.collectAction(sTC, dTC, mappings);
ArrayList<Integer> srcActIds = Utils.collectSrcActNodeIds(sTC, dTC, mappings, actions);
// if(srcActIds.contains(2333)) {
// System.out.println("indeed");
// }else {
// System.out.println("not contains");
// }
ArrayList<Definition> defs1 = defuse.getDef(sTC, "src");//先计算action,再收集defs
ArrayList<Definition> defs2 = defuse.getDef(dTC, "tgt");
HashMap<String, ArrayList<Definition>> defMap1 = defuse.transferDefs(defs1);
HashMap<String, ArrayList<Definition>> defMap2 = defuse.transferDefs(defs2);
HashMap<ITree, ArrayList<Definition>> blockMap1 = defuse.transferBlockMap(defs1, sTC, "src");
HashMap<ITree, ArrayList<Definition>> blockMap2 = defuse.transferBlockMap(defs2, dTC, "tgt");
ArrayList<SubTree> sub1 = sp.splitSubTree(sTC, miName_src);//Subtree中割裂过block,注意
ArrayList<SubTree> sub2 = sp.splitSubTree(dTC, miName_src);//先计算action,再split ST
HashMap<Integer, HashMap<String, String>> usedDefs2Map = new HashMap<Integer, HashMap<String, String>>();
System.out.println("def1size:"+defs1.size());
System.out.println("def2size:"+defs2.size());
System.out.println("def1Mapsize:"+defMap1.size());
System.out.println("def2Mapsize:"+defMap2.size());
System.out.println("block1size:"+blockMap1.size());
System.out.println("block2size:"+blockMap2.size());
// for(SubTree st : sub1) {
// ITree root = st.getRoot();
// System.err.println("StID:"+root.getId());
// }
if(ifOnlyChange==true) {
for(SubTree st : sub1) {
ITree t = st.getRoot();
// System.out.println("StID:"+t.getId());
List<ITree> nodeList = t.getDescendants();
nodeList.add(t);
// for(ITree node : nodeList) {
// int id = node.getId();
// System.out.println("nodeid:"+id);
// }
for(ITree node : nodeList) {
int id = node.getId();
if(srcActIds.contains(id)) {
changedSTree.add(st);
// System.out.println("find a action subtree! "+t.getId());
break;
}
}
}//先找包含action的subtree
}else {
changedSTree = sub1;
}
System.out.println("subSize:"+sub1.size());
System.out.println("changeSize:"+changedSTree.size());
for(SubTree srcT : changedSTree) {
ITree sRoot = srcT.getRoot();
int id = sRoot.getId();
System.out.println("===================");
System.out.println("StID:"+id);
HashMap<String, String> replaceMap_src = new HashMap<String, String>();
HashMap<String, String> replaceMap_dst = new HashMap<String, String>();
HashSet<Definition> usedDefs1 = new HashSet<Definition>();
HashSet<Definition> usedDefs2 = new HashSet<Definition>();
// System.out.println("CheckMapping "+sRoot.getId()+":"+srcT.getMiName());
Boolean same = false;
ArrayList<ITree> leaves1 = new ArrayList<ITree>();
Utils.traverse2Leaf(sRoot, leaves1);
int labelCount = 0;
for(ITree leaf : leaves1) {
String label = leaf.getLabel();
// System.out.println("label:"+label);
if(!label.equals(""))
labelCount++;
String type = sTC.getTypeLabel(leaf);
if(type.equals("literal")) {
leaf.setLabel(Output.deleteLiteral(leaf, sTC));
// if(label.contains("\""))
// replaceMap_src.put("@@"+label+"@@", "None");
// else
// replaceMap_src.put("$$"+label+"$$", "num");//replace Literal
}
ArrayList<Definition> stringList = defMap1.get(label);
if(stringList!=null) {
ITree parBlock = leaf2parblock_map_src.get(leaf);
ArrayList<Definition> blockList = blockMap1.get(parBlock);
for(Definition def1 : stringList) {
if(blockList!=null) {
if(blockList.contains(def1)) {
if(leaf.getId()>def1.getDefLabelID()) {
usedDefs1.add(def1);
System.out.println("DefTest: "+leaf.getLabel()+","+leaf.getId()+","+def1.getDefLabelID());
// leaf.setLabel("var");
replaceMap_src.put(label, label);
}
}
}
if(def1.getDefLabelID()==leaf.getId()) {
// leaf.setLabel("var");
replaceMap_src.put(label, label);
}
// System.out.println(leaf.getId()+","+def1.getDefLabelID());
// System.out.println("Def:"+def1.getType()+","+def1.getVarName());
}
}
}
if(labelCount==0) {
System.err.println("labelCount is 0 ID:"+id);
continue;
}
SubTree dstT = defuse.checkMapping(srcT, mappings, dTC, sub2);
if(dstT==null) {
System.err.println("no dstT searched ID:"+id);
continue;//子树没有对应子树,被删除
}
ITree dRoot = dstT.getRoot();
// System.out.println(sRoot.getId()+"->"+dRoot.getId());
Location location1 = new Location(srcT);
Location location2 = new Location(dstT);
if(usedDefs2Map.get(dRoot.getId())==null) {
ArrayList<ITree> leaves2 = new ArrayList<ITree>();
Utils.traverse2Leaf(dRoot, leaves2);
for(ITree leaf : leaves2) {
String label = leaf.getLabel();
String type = dTC.getTypeLabel(leaf);
if(type.equals("literal")) {
leaf.setLabel(Output.deleteLiteral(leaf, dTC));
// if(label.contains("\""))
// replaceMap_dst.put("@@"+label+"@@", "None");
// else
// replaceMap_dst.put("$$"+label+"$$", "num");//replace Literal
}
ArrayList<Definition> stringList = defMap2.get(label);
if(stringList!=null) {
ITree parBlock = leaf2parblock_map_dst.get(leaf);
ArrayList<Definition> blockList = blockMap2.get(parBlock);
for(Definition def2 : stringList) {
if(blockList!=null) {
if(blockList.contains(def2)) {
if(leaf.getId()>def2.getDefLabelID()) {
usedDefs2.add(def2);
// leaf.setLabel("var");
replaceMap_dst.put(label, label);
}
// System.out.println(leaf.getId()+","+def2.getDefLabelID());
// System.out.println(def2.getType()+","+def2.getVarName());
}
}
if(def2.getDefLabelID()==leaf.getId()) {
// leaf.setLabel("var");
replaceMap_dst.put(label, label);
}
}
}
if(same==false) {
for(ITree leaf1 : leaves1) {
String label1 = leaf1.getLabel();
if(label.equals(label1)) {
same = true;
}
}
}
}
usedDefs2Map.put(dRoot.getId(), replaceMap_dst);
}else {
same = true;
replaceMap_dst = usedDefs2Map.get(dRoot.getId());
}//发现有不同subtree_src映射到同一subTree_dst情况,matching算法问题暂时无法解决
//处理措施为直接复制一份replaceMap_dst,跳过
// String src = Output.subtree2src(srcT);
// String tar = Output.subtree2src(dstT);
// if(outMode.equals("txt")) {
// if(tar.contains("error")&&tar.contains("situation")) {
// errCount++;
// continue;
// }
// if(((float)src.length()/(float)tar.length())<0.25||((float)tar.length()/(float)src.length())<0.25) {
// continue;
// }//长度相差太多的句子直接跳过
// if(ifOnlyChange==true) {
// if(src.equals(tar))
// continue;
// }//去掉相同句子
// }
if(same==false) {
System.err.println("No leaf is the same ID:"+id);
continue;//no leaf is the same
}
if(outMode.equals("txt")) {
if(ifPrintDef==true) {
String buffer = getDefTxt(usedDefs1, usedDefs2, sTC, dTC, srcT, dstT);
printTxt(outPath, outPath1, outPath2, buffer);
}else {
String buffer = getText(sTC, dTC, srcT, dstT);
printTxt(outPath, outPath1, outPath2, buffer);
}
}else if(outMode.equals("json")) {
srcT = absTree(srcT);
dstT = absTree(dstT);
TreeContext st = defuse.buildTC(srcT);
TreeContext dt = defuse.buildTC(dstT);
if(checkSim(st, dt)==false) {
printJson(jpath, st, dt);
treePairs.put(srcT, dstT);
}
}else if(outMode.equals("lineNum")) {
// if(sRoot.getId()==806) {
// for(Map.Entry<String, String> entry : replaceMap_src.entrySet()) {
// String varName = entry.getKey();
// String label = entry.getValue();
// System.err.println(varName+"->"+label+";");
// }
// }
String diffLine_check = "STID:"+srcT.getRoot().getId()+","
+location1.getBeginLine()+","+location1.getLastLine()+","+location1.getBeginCol()+","+location1.getLastCol()+"->"
+location2.getBeginLine()+","+location2.getLastLine()+","+location2.getBeginCol()+","+location2.getLastCol();
String diffLine = miName_src+";"+miName_dst+";"
+location1.getBeginLine()+","+location1.getLastLine()+","+location1.getBeginCol()+","+location1.getLastCol()+"->"
+location2.getBeginLine()+","+location2.getLastLine()+","+location2.getBeginCol()+","+location2.getLastCol();
printLineNum(outPath3, diffLine);
printDefs(outPath4, outPath5, replaceMap_src, replaceMap_dst,
usedDefs1, usedDefs2);
printLineCheck(outPath6, diffLine_check);
}
}
}
System.out.println("errCount:"+errCount);
}
static private void printLineCheck(String outPath6, String diffLine_check) throws IOException {
File output6 = new File(outPath6);
BufferedWriter wr6 = new BufferedWriter(new FileWriter(output6, true));
wr6.append(diffLine_check);
wr6.newLine();
wr6.flush();
wr6.close();
}
static private void printLineNum(String outPath3, String diffLine) throws Exception {
File output3 = new File(outPath3);
BufferedWriter wr3 = new BufferedWriter(new FileWriter(output3, true));
wr3.append(diffLine);
wr3.newLine();
wr3.flush();
// System.out.println("STID:"+srcT.getRoot().getId()+","+dstT.getRoot().getId());
// System.out.println(replaceMap_dst.size());
wr3.close();
}
static private void printDefs(String outPath4, String outPath5,
HashMap<String , String> replaceMap_src, HashMap<String , String> replaceMap_dst,
HashSet<Definition> usedDefs1, HashSet<Definition> usedDefs2) throws IOException {
File output4 = new File(outPath4);
BufferedWriter wr4 = new BufferedWriter(new FileWriter(output4, true));
File output5 = new File(outPath5);
BufferedWriter wr5 = new BufferedWriter(new FileWriter(output5, true));
for(Definition def1 : usedDefs1) {
SubTree st1 = new SubTree(def1.getRoot(), def1.getTc(), 0, "");
Location location1 = new Location(st1);
wr4.append(+location1.getBeginLine()+","+location1.getLastLine()
+","+location1.getBeginCol()+","+location1.getLastCol()+";");
}
for(Definition def2 : usedDefs2) {
SubTree st2 = new SubTree(def2.getRoot(), def2.getTc(), 0, "");
Location location2 = new Location(st2);
wr5.append(+location2.getBeginLine()+","+location2.getLastLine()
+","+location2.getBeginCol()+","+location2.getLastCol()+";");
}
for(Map.Entry<String, String> entry : replaceMap_src.entrySet()) {
String varName = entry.getKey();
String label = entry.getValue();
wr4.append(varName+"->"+label+";");
}
wr4.newLine();
wr4.flush();
for(Map.Entry<String, String> entry : replaceMap_dst.entrySet()) {
String varName = entry.getKey();
String label = entry.getValue();
wr5.append(varName+"->"+label+";");
}
wr5.newLine();
wr5.flush();
wr4.close();
wr5.close();
}
static private String getDefTxt(HashSet<Definition> usedDefs1, HashSet<Definition> usedDefs2,
TreeContext tc1, TreeContext tc2, SubTree srcT, SubTree dstT) throws Exception {
String buffer = "";
for(Definition def : usedDefs1) {
SubTree st = new SubTree(def.getRoot(), tc1, 0, "");
String stat = Output.subtree2src(st);
buffer = buffer +stat+" ; ";
}
String src = Output.subtree2src(srcT);
buffer = buffer + src+"\t";
for(Definition def : usedDefs2) {
SubTree st = new SubTree(def.getRoot(), tc2, 0, "");
String stat = Output.subtree2src(st);
buffer += stat+" ; ";
}
String tar = Output.subtree2src(dstT);
buffer += tar;
if(buffer.contains("error")&&buffer.contains("situation"))
return null;
return buffer;
}
static private SubTree absTree(SubTree st) {
ITree root = st.getRoot();
List<ITree> desList = root.getDescendants();
for(ITree node : desList) {
String label = node.getLabel();
try {
Integer.parseInt(label);
node.setLabel("num");
} catch (Exception e) {
// TODO: handle exception
}
}
return st;
}
static private ArrayList<String> checkExist(String outPath){
ArrayList<String> existList = new ArrayList<String>();
File outDir = new File(outPath);
File[] cpFiles = outDir.listFiles();
System.out.println(cpFiles.length);
for(File cpFile : cpFiles) {
String name = cpFile.getName();
String[] tmp = name.split("\\.")[0].split("_");
String cpNum = tmp[tmp.length-1];
existList.add(cpNum);
}
return existList;
}//断点重新开始任务用
static private Boolean checkSim(TreeContext tc1, TreeContext tc2) {
Boolean full_sim = false;
Defuse defuse = new Defuse();
for(Map.Entry<SubTree, SubTree> entry : treePairs.entrySet()) {
SubTree st1 = entry.getKey();
SubTree st2 = entry.getValue();
try {
TreeContext tc1_used = defuse.buildTC(st1);
TreeContext tc2_used = defuse.buildTC(st2);
Matcher m1 = Matchers.getInstance().getMatcher(tc1.getRoot(), tc1_used.getRoot());
m1.match();
MappingStore mappings1 = m1.getMappings();
ActionGenerator g1 = new ActionGenerator(tc1.getRoot(), tc1_used.getRoot(), mappings1);
List<Action> actions1 = g1.generate();
Matcher m2 = Matchers.getInstance().getMatcher(tc2.getRoot(), tc2_used.getRoot());
m2.match();
MappingStore mappings2 = m2.getMappings();
ActionGenerator g2 = new ActionGenerator(tc2.getRoot(), tc2_used.getRoot(), mappings2);
List<Action> actions2 = g2.generate();
if(actions1.size()==0&&actions2.size()==0) {
full_sim = true;
count++;
return full_sim;
}
} catch (Exception e) {
continue;// TODO: handle exception
}
}
return full_sim;
}
static private String getText(TreeContext tc1, TreeContext tc2, SubTree srcT, SubTree dstT) throws Exception {
String buffer = "";
String src = Output.subtree2src(srcT);
String tar = Output.subtree2src(dstT);
buffer = src+"\t"+tar;
if(buffer.contains("error")&&buffer.contains("situation"))
return null;
return buffer;
}
static private void printTxt(String outPath, String outPath1, String outPath2, String buffer) throws Exception {
if(buffer==null)
return;
File output = new File(outPath);
BufferedWriter wr = new BufferedWriter(new FileWriter(output, true));
File output1 = new File(outPath1);
File output2 = new File(outPath2);
BufferedWriter wr1 = new BufferedWriter(new FileWriter(output1, true));
BufferedWriter wr2 = new BufferedWriter(new FileWriter(output2, true));
String src = buffer.split("\t")[0];
String dst = buffer.split("\t")[1];
wr.append(buffer);
wr.newLine();
wr.flush();
wr1.append(src);
wr1.newLine();
wr1.flush();
wr2.append(dst);
wr2.newLine();
wr2.flush();
wr.close();
wr1.close();
wr2.close();
}
static private void printJson(String jpath, TreeContext srcT, TreeContext dstT) throws Exception {
File dir = new File(jpath);
if(!dir.exists()) {
dir.mkdirs();
}
File[] files = dir.listFiles();
int fileSize = files.length;
if(srcT!=null) {
String out = jpath+"pair"+String.valueOf(fileSize/2)+"_src.json";
BufferedWriter wr = new BufferedWriter(new FileWriter(new File(out)));
wr.append(TreeIoUtils.toJson(srcT).toString());
wr.flush();
wr.close();
}
if(dstT!=null) {
String out1 = jpath+"pair"+String.valueOf(fileSize/2)+"_tgt.json";
BufferedWriter wr1 = new BufferedWriter(new FileWriter(new File(out1)));
wr1.append(TreeIoUtils.toJson(dstT).toString());
wr1.flush();
wr1.close();
}
}
}
| 20,289 | 35.492806 | 123 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/collect/testcase/CollectFullProject.java
|
package collect.testcase;
import java.io.File;
import java.io.IOException;
import java.util.List;
import org.apache.commons.exec.CommandLine;
import org.apache.commons.exec.DefaultExecutor;
import org.apache.commons.exec.ExecuteException;
import org.apache.commons.exec.ExecuteWatchdog;
import org.apache.commons.io.FileUtils;
public class CollectFullProject {
/**
* Collecting the full project for test sets
* @throws Exception
*/
public static void main(String[] args) throws Exception {
String repoPath = "J:\\git_repo";
String testPath = "I:\\20210714-Srqtrans_testcase\\Vulnerability_testcase";
String output = "I:\\20210714-Srqtrans_testcase\\Vulnerability_testset_full";
// extractProject(repoPath, testPath, output);
addDiffFile(testPath, output);
}
private static void addDiffFile(String testPath, String output) throws IOException {
File testRoot = new File(testPath);
File[] testDirs = testRoot.listFiles();
for(File testDir : testDirs) {
String cpName = testDir.getName();
String diffPath = testDir.getAbsolutePath()+"\\diffs.txt";
String copyRoot = output+"\\"+cpName;
FileUtils.copyFileToDirectory(new File(diffPath), new File(copyRoot));
}
}
public static void extractProject(String repoPath, String testPath, String output) throws Exception {
File testRoot = new File(testPath);
File[] testDirs = testRoot.listFiles();
for(File testDir : testDirs) {
String cpName = testDir.getName();
String diffPath = testDir.getAbsolutePath()+"\\diffs.txt";
File diffFile = new File(diffPath);
if(!diffFile.exists())
throw new Exception("file is not existed!");
List<String> lines = FileUtils.readLines(diffFile, "UTF-8");
String projectName = lines.get(0);
String classPath = repoPath +"\\"+projectName;
String oldcommit = lines.get(1).split(";")[0];
String copyRoot1 = output+"\\"+cpName+"\\"+oldcommit;
String newcommit = lines.get(1).split(";")[1];
String copyRoot2 = output+"\\"+cpName+"\\"+newcommit;
switchProject(classPath, oldcommit);
FileUtils.copyDirectoryToDirectory(new File(classPath), new File(copyRoot1));
switchProject(classPath, newcommit);
FileUtils.copyDirectoryToDirectory(new File(classPath), new File(copyRoot2));
}
}
public static void switchProject(String classPath, String commit) throws Exception{
// String movePath = diskpath+"cp"+String.valueOf(n)+"\\"+newCommitName+"\\";
String line = "cmd.exe /C git checkout -f "+commit;
int[] exitvalues = {0, 1};
System.out.println(line);
CommandLine cmdLine = CommandLine.parse(line);
DefaultExecutor executor = new DefaultExecutor();
ExecuteWatchdog watchdog = new ExecuteWatchdog(300000);//timeout 5min
executor.setExitValues(exitvalues);
executor.setWorkingDirectory(new File(classPath));
executor.setWatchdog(watchdog);
executor.execute(cmdLine);
Thread.sleep(6000);
}
}
| 2,894 | 36.597403 | 102 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/collect/testcase/CollectTestcase.java
|
package collect.testcase;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileDescriptor;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.net.URL;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Date;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Scanner;
import org.apache.commons.exec.CommandLine;
import org.apache.commons.exec.DefaultExecuteResultHandler;
import org.apache.commons.exec.DefaultExecutor;
import org.apache.commons.exec.ExecuteWatchdog;
import org.apache.commons.io.FileUtils;
import org.eclipse.jgit.api.Git;
import org.eclipse.jgit.api.errors.GitAPIException;
import org.eclipse.jgit.diff.DiffEntry;
import org.eclipse.jgit.diff.DiffFormatter;
import org.eclipse.jgit.diff.RawTextComparator;
import org.eclipse.jgit.lib.CheckoutEntry;
import org.eclipse.jgit.lib.ObjectId;
import org.eclipse.jgit.lib.ObjectReader;
import org.eclipse.jgit.lib.Ref;
import org.eclipse.jgit.lib.ReflogEntry;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.revwalk.RevCommit;
import org.eclipse.jgit.revwalk.RevObject;
import org.eclipse.jgit.revwalk.RevWalk;
import org.eclipse.jgit.storage.file.FileRepositoryBuilder;
import org.eclipse.jgit.treewalk.CanonicalTreeParser;
import structure.ChangePair;
import utils.FileOperation;
public class CollectTestcase {
/**
* Collecting change pairs from git commit diff logs with traditional test cases
* @throws Exception
*/
public static void main(String[] args) throws Exception {
// TODO Auto-generated method stub
String dataPath="D:\\workspace\\Pycharm\\20191222-Vulnerability-dataset\\dataset.csv";//��Ҫ������Commit Hash
String rootPath="J:\\git_repo\\";
String diskpath = "I:\\20210714-Srqtrans_testcase\\Vulnerability_trainset\\";
String testPath = "I:\\20210714-Srqtrans_testcase\\Vulnerability_testcase\\";
autoExtraction(dataPath, rootPath, diskpath, testPath);
// getChangeList(versionCommit, path);
}
public static void autoExtraction(String dataPath, String rootPath, String diskpath,
String testPath) throws Exception {
File csvFile = new File(dataPath);
BufferedReader br = new BufferedReader(new FileReader(csvFile));
String tmpline = "";
ArrayList<String> lines = new ArrayList<String>();
while((tmpline=br.readLine())!=null) {
lines.add(tmpline);
}
br.close();
ArrayList<String> testHashs = new ArrayList<String>();
if(testPath!=null) {
testHashs = checkExistTests(testPath);
}//skip
// int n = continueProcess(diskpath);//continue from the next cpNum
int n = 344;
int errorNum = 0;
BufferedWriter wr = new BufferedWriter(new FileWriter(new File("err.txt"), true));
for(int i=n;i<lines.size();i++,n++) {
System.out.println("round:"+i);
String line = lines.get(i);
String[] tokens = line.split(",");
String CVE = tokens[0];
String URL = tokens[1];
String commit = tokens[2];
String repoName = URL.split("/")[URL.split("/").length-1];
System.out.println(repoName+","+commit);
String classPath = rootPath+repoName+"\\.git";
System.out.println(classPath);
File repoDir = new File(classPath);
if (!repoDir.exists()) {
System.err.println(repoName+" not exists!");
continue;
}
FileRepositoryBuilder builder = new FileRepositoryBuilder();
builder.setMustExist(true);
// builder.addCeilingDirectory(new File(classPath));
Repository repo = builder.setGitDir(new File(classPath))
.readEnvironment()
.findGitDir()
.build();
RevWalk walk = new RevWalk(repo);
ObjectId versionId=repo.resolve(commit);
ChangePair cp = new ChangePair();
try {
String fullCommitId = versionId.getName();
if(testHashs.contains(fullCommitId)) {
continue;
}//skip test set
RevCommit currentCommit=walk.parseCommit(versionId);
System.out.println("Commit:"+currentCommit.getName());
cp = getChangPair(currentCommit, repo);
cp.setRootPath(rootPath);
cp.setRepoName(repoName);
} catch (Exception e) {
errorNum++;
wr.append(repoName+","+commit);
wr.newLine();
e.printStackTrace();
continue;
}
RevCommit newCommit = cp.getNewCommit();
RevCommit oldCommit = cp.getOldCommit();
String newCommitName = newCommit.getName();
String oldCommitName = oldCommit.getName();
System.out.println("cp"+n+":"+oldCommitName+";"+newCommitName);
n = runExec(CVE, cp, repo, n, diskpath);
walk.close();
}
wr.close();
System.out.println("Error number:"+errorNum);
System.out.println("CPsize:"+n);
}
private static int runExec(String CVE, ChangePair cp, Repository repo, int n, String diskpath) throws Exception {
RevCommit newCommit = cp.getNewCommit();
RevCommit oldCommit = cp.getOldCommit();
String newCommitName = newCommit.getName();
String oldCommitName = oldCommit.getName();
String rootPath = cp.getRootPath();
String repoName = cp.getRepoName();
String classPath = rootPath+repoName+"\\";
String movePath = diskpath+"cp"+String.valueOf(n)+"\\"+newCommitName+"\\";
String line = "cmd.exe /C git checkout -f "+newCommitName;
int[] exitvalues = {0, 1};
System.out.println(line);
CommandLine cmdLine = CommandLine.parse(line);
DefaultExecutor executor = new DefaultExecutor();
ExecuteWatchdog watchdog = new ExecuteWatchdog(300000);//timeout 5min
executor.setExitValues(exitvalues);
executor.setWorkingDirectory(new File(classPath));
executor.setWatchdog(watchdog);
executor.execute(cmdLine);
Thread.sleep(6000);
List<DiffEntry> diffs = cp.getDiffs();
ArrayList<DiffEntry> filterDiffs = getUsefulDiffs(diffs);
System.out.println("Diffsize:"+filterDiffs.size());
if(filterDiffs.size()==0) {
return n;// continue the next iter
}
String diffDir = diskpath+"cp"+String.valueOf(n)+"\\diff_logs\\";
File diffDirFile = new File(diffDir);
if (!diffDirFile.exists()) {
diffDirFile.mkdirs();
}
int count = 0;
for (DiffEntry entry : filterDiffs) {
ByteArrayOutputStream out = new ByteArrayOutputStream();
DiffFormatter df = new DiffFormatter(out);
df.setDiffComparator(RawTextComparator.WS_IGNORE_ALL);
df.setRepository(repo);
String path = diffDir+"diff"+String.valueOf(count)+".txt";
BufferedWriter wr = new BufferedWriter(new FileWriter(new File(path)));
df.format(entry);
String diffText = out.toString("UTF-8");
// System.out.println(diffText);
wr.append(diffText);
wr.close();
df.close();
count++;
}
String diffPath = diskpath+"cp"+String.valueOf(n)+"\\diffs.txt";
File diffFile = new File(diffPath);
if (!diffFile.getParentFile().exists()) {
diffFile.getParentFile().mkdirs();
}
String tagPath = diskpath+"cp"+String.valueOf(n)+"\\tags.txt";
BufferedWriter wr = new BufferedWriter(new FileWriter(diffFile));
BufferedWriter wr1 = new BufferedWriter(new FileWriter(tagPath));
wr.append(cp.getRepoName());
wr.newLine();
wr.append(oldCommitName+";"+newCommitName);
wr.newLine();
wr.flush();
wr1.append("newCommit:\n"+newCommit.getFullMessage());
wr1.newLine();
wr1.append("oldCommit:\n"+oldCommit.getFullMessage());
wr1.close();
for (DiffEntry entry : filterDiffs) {
wr.append(entry.getOldPath()+";"+entry.getNewPath());
wr.newLine();
wr.flush();
String newFilePath = classPath+entry.getNewPath();
String copyPath = movePath+entry.getNewPath();
FileOperation.copyFile(new File(newFilePath), new File(copyPath));//copy changeFile
}
wr.close();
// Thread.sleep(5000);
String movePath1 = diskpath+"cp"+String.valueOf(n)+"\\"+oldCommitName+"\\";
String line1 = "cmd.exe /C git checkout -f "+oldCommitName;
CommandLine cmdLine1 = CommandLine.parse(line1);
DefaultExecuteResultHandler resultHandler1 = new DefaultExecuteResultHandler();
DefaultExecutor executor1 = new DefaultExecutor();
ExecuteWatchdog watchdog1 = new ExecuteWatchdog(300000);//timeout 10s
executor1.setExitValues(exitvalues);
executor1.setWorkingDirectory(new File(classPath));
executor1.setWatchdog(watchdog1);
executor1.execute(cmdLine1, resultHandler1);
Thread.sleep(6000);
for (DiffEntry entry : filterDiffs) {
String oldFilePath = classPath+entry.getOldPath();
String copyPath = movePath1+entry.getOldPath();
FileOperation.copyFile(new File(oldFilePath), new File(copyPath));//copy changeFile
}
resultHandler1.waitFor();
// Thread.sleep(5000);
return n;
}//Execute checkout and copy diffs
public static ArrayList<DiffEntry> getUsefulDiffs(List<DiffEntry> diffs){
Boolean containTestcase = true;
// for (DiffEntry entry : diffs) {
// String oldFilePath = entry.getOldPath();
// String newFilePath = entry.getNewPath();
// if(oldFilePath.contains("Test")&&newFilePath.contains("Test")){
// containTestcase = true;
// break;
// }
// }
ArrayList<DiffEntry> filterDiffs = new ArrayList<DiffEntry>();
if(containTestcase) {
for (DiffEntry entry : diffs) {
String oldFilePath = entry.getOldPath();
System.out.println("old:"+oldFilePath);
String newFilePath = entry.getNewPath();
System.out.println("new:"+newFilePath);
System.out.println("---------");
if(oldFilePath.contains("/dev/null")||newFilePath.contains("/dev/null")) {
continue;//filter changepair
}else if(oldFilePath.contains(".java")&&newFilePath.contains(".java")){
filterDiffs.add(entry);
}
// else if((oldFilePath.contains(".cpp")||oldFilePath.contains(".CPP")||
// oldFilePath.contains(".cc")||oldFilePath.contains(".h")||oldFilePath.contains(".c"))
// &&(newFilePath.contains(".cpp")||newFilePath.contains(".CPP")||
// newFilePath.contains(".cc")||newFilePath.contains(".h")||newFilePath.contains(".c"))){
// filterDiffs.add(entry);
// }
}
}
return filterDiffs;
}
public static Integer continueProcess(String rootPath) {
File rootFile = new File(rootPath);
File[] dirs = rootFile.listFiles();
int n = 0;
if (dirs.length==0)
return n;
for(File dir : dirs) {
String cpName = dir.getName();
int cpNum = Integer.valueOf(cpName.substring(2, cpName.length()));
if(cpNum>=n) {
n = cpNum+1;
}
}
return n;
}//get cpNumber and continue from the next number
public static ChangePair getChangPair(RevCommit revCommit, Repository repo) throws Exception {
List<DiffEntry> returnDiffs = null;
RevCommit previousCommit=getPrevHash(revCommit, repo);
System.out.println("PrevCommit:"+previousCommit.getName());
try {
ObjectId head=revCommit.getTree().getId();
ObjectId oldHead=previousCommit.getTree().getId();
System.out.println("Printing diff between the Revisions: " + revCommit.getName() + " and " + previousCommit.getName());
// prepare the two iterators to compute the diff between
try (ObjectReader reader = repo.newObjectReader()) {
CanonicalTreeParser oldTreeIter = new CanonicalTreeParser();
oldTreeIter.reset(reader, oldHead);
CanonicalTreeParser newTreeIter = new CanonicalTreeParser();
newTreeIter.reset(reader, head);
// finally get the list of changed files
try (Git git = new Git(repo)) {
List<DiffEntry> diffs= git.diff()
.setNewTree(newTreeIter)
.setOldTree(oldTreeIter)
.call();
returnDiffs=diffs;
} catch (GitAPIException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
ChangePair cp = new ChangePair(revCommit, previousCommit, returnDiffs);
return cp;
}
private static ArrayList<String> checkExistTests(String testPath) throws IOException{
File[] testCPs = (new File(testPath)).listFiles();
ArrayList<String> testHashs = new ArrayList<String>();
for(File testCP : testCPs) {
String diffPath = testCP.getAbsolutePath()+"\\diffs.txt";
List<String> lines = FileUtils.readLines(new File(diffPath), "UTF-8");
// String srcHash = lines.get(1).split(";")[0];
String dstHash = lines.get(1).split(";")[1];
testHashs.add(dstHash);
}
return testHashs;
}
public static RevCommit getPrevHash(RevCommit commit, Repository repo) throws IOException {
RevWalk revWalk = new RevWalk(repo);
RevCommit previous = revWalk.parseCommit(commit.getParent(0).getId());
//Reached end and no previous commits.
revWalk.close();
return previous;
}
static void printTime(int commitTime) {
SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
String timestampString=String.valueOf(commitTime);
Long timestamp = Long.parseLong(timestampString) * 1000;
String date = formatter.format(new Date(timestamp));
System.out.println(date);
}
}
| 13,201 | 35.980392 | 122 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/collect/testcase/CountBugNum.java
|
package collect.testcase;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.tuple.Pair;
import java.io.File;
import java.io.IOException;
import java.util.*;
/**
* Count bug numbers for each repo and then sort them
* @throws Exception
*/
public class CountBugNum {
private static HashMap<String, Integer> repoFreq = new HashMap<String, Integer>();
private static ArrayList<String> fullHashs = new ArrayList<>();
private static HashMap<String, String> cveMap= new HashMap<>();
private static HashMap<String, String> cweMap= new HashMap<>();
public static void main(String[] args) throws Exception {
String rootPath = "I:\\20210714-Srqtrans_testcase\\Vulnerability_testset_full";
CountBugNum cb = new CountBugNum();
cb.countRepos(rootPath);
}
public List<Map.Entry<String, Integer>> countRepos(String rootPath) throws Exception {
File rootFile = new File(rootPath);
File[] fileList = rootFile.listFiles();
ArrayList<Pair<String, String>> pairList = new ArrayList();
for(int i=0;i<fileList.length;i++) {
File cpFile = fileList[i];
String cpPath = cpFile.getAbsolutePath();
Pair<String, String> pair = searchRepo(cpPath);
pairList.add(pair);
}
String pontaPath = "D:\\workspace\\Pycharm\\20191222-Vulnerability-dataset\\dataset.csv";
readPontaCsv(pontaPath);
String cwePath = "D:\\workspace\\Pycharm\\20191222-Vulnerability-dataset\\CVE2CWE-map.csv";
readCWE(cwePath);
HashMap<String, Integer> freqCWE = new HashMap<>();
HashMap<String, ArrayList<String>> cweKinds = new HashMap<>();
for(String hash1 : fullHashs){
for(Pair<String, String> pair : pairList){
String repoName = pair.getLeft();
String hash2 = pair.getRight();
if(hash2.contains(hash1)){
String cve = cveMap.get(hash1);
String cwe = cweMap.get(cve);
System.out.println(cve);
System.out.println(cwe);
ArrayList<String> cweList = cweKinds.get(repoName);
if(cweList==null||!cweList.contains(cwe)){
if(freqCWE.containsKey(repoName)) {
freqCWE.put(repoName, freqCWE.get(repoName)+1);
if(cweList==null)
cweList = new ArrayList<String>();
cweList.add(cwe);
cweKinds.put(repoName, cweList);
}else {
freqCWE.put(repoName, 1);
if(cweList==null)
cweList = new ArrayList<String>();
cweList.add(cwe);
cweKinds.put(repoName, cweList);
}
}
break;
}
}
}
List<Map.Entry<String, Integer>> list = new ArrayList<Map.Entry<String, Integer>>(repoFreq.entrySet());
Collections.sort(list, new Comparator<Map.Entry<String, Integer>>() {
public int compare(Map.Entry<String, Integer> o1, Map.Entry<String, Integer> o2) {
return (o2.getValue() - o1.getValue());
//return (o1.getKey()).toString().compareTo(o2.getKey());
}
});
for(Map.Entry<String, Integer> mapping: list){
System.out.println(mapping.getKey()+":"+mapping.getValue());
}
System.out.println("-------------------------");
List<Map.Entry<String, Integer>> list1 = new ArrayList<Map.Entry<String, Integer>>(freqCWE.entrySet());
Collections.sort(list1, new Comparator<Map.Entry<String, Integer>>() {
public int compare(Map.Entry<String, Integer> o1, Map.Entry<String, Integer> o2) {
return (o2.getValue() - o1.getValue());
//return (o1.getKey()).toString().compareTo(o2.getKey());
}
});
for(Map.Entry<String, Integer> mapping: list1){
System.out.println(mapping.getKey()+":"+mapping.getValue());
}
return list;
}
private static Pair<String, String> searchRepo(String cpPath) throws Exception {
File cpFile = new File(cpPath);
// System.out.println("Analyse:"+ cpFile.getName());
String hash = null;
String diffPath = cpFile.getAbsolutePath()+"\\diffs.txt";
File diffFile = new File(diffPath);
if(!diffFile.exists())
throw new Exception("file is not existed!");
List<String> lines = FileUtils.readLines(diffFile, "UTF-8");
String repoName = lines.get(0);
hash = lines.get(1).split(";")[1];
Pair<String, String> pair = Pair.of(repoName, hash);
if(repoFreq.containsKey(repoName)) {
repoFreq.put(repoName, repoFreq.get(repoName)+1);
}else {
repoFreq.put(repoName, 1);
}
return pair;
}
private static void readPontaCsv(String path) throws IOException {
List<String> lines = FileUtils.readLines(new File(path), "utf-8");
for(String line : lines){
String[] tmps = line.split(",");
String hash = tmps[2];
String cve = tmps[0];
fullHashs.add(hash);
cveMap.put(hash, cve);
}
}
private static void readCWE(String path) throws IOException {
List<String> lines = FileUtils.readLines(new File(path), "utf-8");
for(String line : lines){
String[] tmps = line.split(",");
String cve = tmps[0];
String cwe = tmps[1];
cweMap.put(cve, cwe);
}
}
}
| 4,887 | 32.710345 | 105 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/collect/testcase/FilterTestcase.java
|
package collect.testcase;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import org.apache.commons.io.FileUtils;
public class FilterTestcase {
public static void main(String[] args) throws Exception{
String cpPath = "J:\\Vulnerability_commit";
String rootPath = "I:\\20210714-Srqtrans_testcase\\Vulnerability_testcase";
String copyPath = "I:\\20210714-Srqtrans_testcase\\Vulnerability_trainset";
// filterTestcases(rootPath, copyPath);
// copyTop5(rootPath, copyPath);
filterTrainset(cpPath, rootPath, copyPath);
}
private static void copyTop5(String rootPath, String copyPath) throws Exception {
CountBugNum cb = new CountBugNum();
File copyRoot = new File(copyPath);
List<Map.Entry<String, Integer>> repoList = cb.countRepos(rootPath);
int count = 0;
ArrayList<String> topList = new ArrayList<String>();
for(Map.Entry<String, Integer> mapping: repoList){
String repoName = mapping.getKey();
int frequency = mapping.getValue();
System.out.println(repoName+":"+frequency);
if(count<5) {
topList.add(repoName);
count++;
}
}
File rootFile = new File(rootPath);
File[] fileList = rootFile.listFiles();
for(String repo : topList) {
for(File cpFile : fileList) {
String diffPath = cpFile.getAbsolutePath()+"\\diffs.txt";
File diffFile = new File(diffPath);
if(!diffFile.exists())
throw new Exception("file is not existed!");
List<String> lines = FileUtils.readLines(diffFile, "UTF-8");
String repoName = lines.get(0);
if(repoName.equals(repo)) {
FileUtils.copyDirectoryToDirectory(cpFile, copyRoot);
}
}
}
}
private static void filterTrainset(String cpPath, String testPath, String copyPath) throws IOException {
File testRoot = new File(testPath);
File[] testList = testRoot.listFiles();
ArrayList<String> testNames = new ArrayList<String>();
for(File testFile : testList) {
testNames.add(testFile.getName());
}
ArrayList<File> copyList = new ArrayList<File>();
File rootFile = new File(cpPath);
File[] cpList = rootFile.listFiles();
for(File cpFile : cpList) {
String cpName = cpFile.getName();
if(testNames.contains(cpName)) {
continue;
}else {
copyList.add(cpFile);
}
}
if(copyPath!=null) {
for(File cpFile : copyList) {
File copyDir = new File(copyPath+"\\"+cpFile.getName());
FileUtils.copyDirectory(cpFile, copyDir);
}
}
}
private static ArrayList<File> filterTestcases(String rootPath, String copyPath) throws Exception{
ArrayList<File> cpList = new ArrayList<File>();
File rootFile = new File(rootPath);
File[] fileList = rootFile.listFiles();
for(int i=0;i<fileList.length;i++) {
File cpFile = fileList[i];
System.out.println(i+":"+cpFile.getName());
String cpPath = cpFile.getAbsolutePath();
ArrayList<String> matchedList = filterTestcase(cpPath);
int matchSize = matchedList.size();
if(matchSize%4!=0)
throw new Exception("check the matchedList!");
if (matchedList.size()!=0) {
cpList.add(cpFile);
}
}
System.out.println("cpSize:"+cpList.size());
if(copyPath!=null) {
for(File cpFile : cpList) {
File copyDir = new File(copyPath+"\\"+cpFile.getName());
FileUtils.copyDirectory(cpFile, copyDir);
}
}
return cpList;
}//Filter and copy cpFiles that contain test cases.
private static ArrayList<String> filterTestcase(String path) throws Exception{
ArrayList<String> matchedList = new ArrayList<String>();
File cpFile = new File(path);
System.out.println("Analyse:"+ cpFile.getName());
String diffPath = cpFile.getAbsolutePath()+"\\diffs.txt";
File diffFile = new File(diffPath);
if(!diffFile.exists())
throw new Exception("file is not existed!");
List<String> lines = FileUtils.readLines(diffFile, "UTF-8");
String repoName = lines.get(0);
System.out.println(repoName);
// String srcHash = lines.get(1).split(";")[0];
// String dstHash = lines.get(1).split(";")[1];
lines.remove(0);
lines.remove(0);//remove two lines of the diff file
for(int i=0;i<lines.size();i++) {
String tmpString = lines.get(i);
// System.out.println(tmpString);
String path1 = tmpString.split(";")[0];
String path2 = tmpString.split(";")[1];
String[] names1 = path1.split("/");
String src_filename = names1[names1.length-1];
src_filename = src_filename.substring(0, src_filename.length()-5);//remove ".java"
String[] names2 = path2.split("/");
String dst_filename = names2[names2.length-1];
dst_filename = dst_filename.substring(0, dst_filename.length()-5);//remove ".java"
if(containTest(src_filename)&&containTest(dst_filename)) {
continue;
}else{
for(int j=0;j<lines.size();j++) {
String tmpString1 = lines.get(j);
String path3 = tmpString1.split(";")[0];
String path4 = tmpString1.split(";")[1];
String[] names3 = path3.split("/");
String src_testname = names3[names3.length-1];
String[] names4 = path4.split("/");
String dst_testname = names4[names4.length-1];
if(src_testname.contains(src_filename)&&containTest(src_testname)
&&dst_testname.contains(dst_filename)&&containTest(dst_testname)) {
matchedList.add(src_filename);
matchedList.add(src_testname);
matchedList.add(dst_filename);
matchedList.add(dst_testname);
break;
}
}
}
}
System.out.println("matchSize:"+matchedList.size());
return matchedList;
}//filter files that with the same named testcases
private static Boolean containTest(String name) {
if((name.contains("TestCase")||name.contains("Test"))){
return true;
}else
return false;
}
}
| 5,837 | 32.745665 | 105 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/collect/testcase/ModifyGradleBuild.java
|
package collect.testcase;
import java.io.BufferedReader;
import java.io.CharArrayWriter;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.filefilter.DirectoryFileFilter;
import org.apache.commons.io.filefilter.FileFilterUtils;
public class ModifyGradleBuild {
public static void main(String[] args) throws Exception {
String cpRoot = "I:\\20210714-Srqtrans_testcase\\Vulnerability_testset_full";
searchURL(cpRoot);
}
public static void searchURL(String cpRoot) throws Exception {
File[] cpList = (new File(cpRoot)).listFiles();
ArrayList<String> noGradleList = new ArrayList<String>();
for(File cpFile : cpList) {
File[] cpDirs = cpFile.listFiles();
for(File cpDir : cpDirs) {
if(cpDir.isDirectory()) {
// if(!cpFile.getName().equals("cp102"))
// continue;
System.out.println(cpDir.getName());
Collection<File> collections = FileUtils.listFiles(cpDir,
FileFilterUtils.suffixFileFilter("build.gradle"), DirectoryFileFilter.INSTANCE);
ArrayList<File> filters = new ArrayList<>(collections);
Collection<File> collections1 = FileUtils.listFiles(cpDir,
FileFilterUtils.suffixFileFilter("build_properties.gradle"), DirectoryFileFilter.INSTANCE);
ArrayList<File> filters1 = new ArrayList<>(collections1);
if(filters.size()==0) {
noGradleList.add(cpFile.getName());
}
System.out.println(cpFile.getName()+": "+filters.size());
for(File gradleBuild : filters) {
replaceTextContent(gradleBuild);
}
for(File gradleBuild : filters1) {
replaceTextContent(gradleBuild);
}
}
}
}
for(String no : noGradleList) {
System.out.println(no);
}
}
/**
* 替换文本文件中的字符串
* @param path
* @throws IOException
*/
public static void replaceTextContent(File replaceFile) throws IOException{
List<String> old_contents = FileUtils.readLines(replaceFile, "utf-8");
ArrayList<String> new_contents = new ArrayList<String>();
//原有的内容
String srcStr1 = "http?://repo.spring.io/plugins-release";
//要替换的内容
String replaceStr1 = "https://maven.aliyun.com/repository/spring-plugin";
//原有的内容
String srcStr3 = "http?://repo.springsource.org/plugins-release";
//要替换的内容
String replaceStr3 = "https://maven.aliyun.com/repository/spring-plugin";
//原有的内容
String srcStr4 = "http?://maven.google.com/";
//要替换的内容
String replaceStr4 = "https://maven.aliyun.com/repository/google";
//原有的内容
String srcStr5 = "http?://plugins.gradle.org/m2/";
//要替换的内容
String replaceStr5 = "https://maven.aliyun.com/repository/gradle-plugin";
//原有的内容
String srcStr6 = "http?://repo.grails.org/grails/core";
//要替换的内容
String replaceStr6 = "https://maven.aliyun.com/repository/grails-core";
//原有的内容
String srcStr7 = "http?://repository.apache.org/snapshots/";
//要替换的内容
String replaceStr7 = "https://maven.aliyun.com/repository/apache-snapshots";
String addString1 = " maven {url 'https://maven.aliyun.com/repository/spring'}";
String addString2 = " maven {url 'https://maven.aliyun.com/repository/public'}";
String addString3 = " maven {url 'https://maven.aliyun.com/repository/jcenter'}";
String addString4 = " maven {url 'https://maven.aliyun.com/repository/central'}";
for(int i=0; i<old_contents.size();i++) {
String line = old_contents.get(i);
line = line.replaceAll(srcStr1, replaceStr1);
// line = line.replaceAll(srcStr2, replaceStr2);
line = line.replaceAll(srcStr3, replaceStr3);
line = line.replaceAll(srcStr4, replaceStr4);
line = line.replaceAll(srcStr5, replaceStr5);
line = line.replaceAll(srcStr6, replaceStr6);
line = line.replaceAll(srcStr7, replaceStr7);
// System.out.println(line);
new_contents.add(line);
if(line.contains("repositories {")) {
String nextLine = old_contents.get(i+1);
if(!nextLine.contains("https://maven.aliyun.com/repository/spring")) {
new_contents.add(addString1);
new_contents.add(addString2);
new_contents.add(addString3);
new_contents.add(addString4);
}
}
}
FileUtils.writeLines(replaceFile, new_contents);
}
}
| 4,654 | 37.471074 | 98 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/collect/testcase/ModifyMavenBuild.java
|
package collect.testcase;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.filefilter.DirectoryFileFilter;
import org.apache.commons.io.filefilter.FileFilterUtils;
import org.dom4j.Document;
import org.dom4j.DocumentException;
import org.dom4j.Element;
import org.dom4j.io.SAXReader;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.Writer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
/**
* @author chi
* @version add ignore failure
*/
public class ModifyMavenBuild {
public static void main(String[] args) throws Exception {
String cpRoot = "D:\\Program Files\\languagetool-master";
searchURL(cpRoot);
}
public static void searchURL(String cpRoot) throws Exception {
File[] cpList = (new File(cpRoot)).listFiles();
ArrayList<String> noMavenList = new ArrayList<String>();
for(File cpFile : cpList) {
File[] cpDirs = cpFile.listFiles();
for(File cpDir : cpDirs) {
if(cpDir.isDirectory()) {
// if(!cpFile.getName().equals("cp102"))
// continue;
System.out.println(cpDir.getName());
Collection<File> collections = FileUtils.listFiles(cpDir,
FileFilterUtils.suffixFileFilter("pom.xml"), DirectoryFileFilter.INSTANCE);
ArrayList<File> filters = new ArrayList<>(collections);
if(filters.size()==0) {
noMavenList.add(cpFile.getName());
}
System.out.println(cpFile.getName()+": "+filters.size());
for(File mavenBuild : filters) {
addTextContent(mavenBuild);
// replaceTextContent(mavenBuild);
}
}
}
}
for(String no : noMavenList) {
System.out.println(no);
}
}
/**
* 添加failure忽略
* @param path
* @throws IOException
*/
public static void addTextContent(File replaceFile) throws IOException{
SAXReader reader = new SAXReader();
try {
Document read = reader.read(replaceFile);
// // 获取根节点
Element root = read.getRootElement();
ArrayList<Element> nodeList = new ArrayList<Element>();
nodeList = getNodes(root, nodeList);
for(Element e : nodeList) {
if(e.getText().equals("maven-surefire-plugin")) {
System.out.println("find test");
Element par = e.getParent();
Iterator eles = par.elementIterator();
while(eles.hasNext()) {
Element child = (Element)eles.next();
if(child.getName().equals("configuration")) {
Element addElement = child.addElement("testFailureIgnore");
addElement.addText("true");
}
}
}
}
//[7]将document对象输出到.xml文件中即可。
Writer writer=new FileWriter(replaceFile);
read.write(writer);
//writer是自己创建的,最后还需要关闭:
//关闭打开的资源:
writer.close();
} catch (DocumentException e) {
e.printStackTrace();
}
}
public static ArrayList<Element> getNodes(Element node, ArrayList<Element> nodeList){
// System.out.println("--------------------");
// //当前节点的名称、文本内容和属性
// System.out.println("当前节点名称:"+node.getName());//当前节点名称
// System.out.println("当前节点的内容:"+node.getTextTrim());//当前节点名称
// List<Attribute> listAttr=node.attributes();//当前节点的所有属性的list
// for(Attribute attr:listAttr){//遍历当前节点的所有属性
// String name=attr.getName();//属性名称
// String value=attr.getValue();//属性的值
// System.out.println("属性名称:"+name+"属性值:"+value);
// }
//递归遍历当前节点所有的子节点
List<Element> listElement=node.elements();//所有一级子节点的list
nodeList.addAll(listElement);
for(Element e:listElement){//遍历所有一级子节点
getNodes(e, nodeList);//递归
}
return nodeList;
}
}
| 3,819 | 29.806452 | 89 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/collect/testcase/MultiSpotbugsTestRun.java
|
package collect.testcase;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.exec.CommandLine;
import org.apache.commons.exec.DefaultExecutor;
import org.apache.commons.exec.ExecuteWatchdog;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.dom4j.Document;
import org.dom4j.DocumentHelper;
import org.dom4j.Element;
import org.dom4j.io.OutputFormat;
import org.dom4j.io.XMLWriter;
public class MultiSpotbugsTestRun {
public static void main(String[] args) throws Exception {
String cpRoot = "I:\\20210714-Srqtrans_testcase\\Vulnerability_testset_full\\";
multiRun(cpRoot);
// spotbugsTest(new File(cpRoot+"cp12"));
}
public static void multiRun(String cpRoot) throws IOException {
File[] cpList = (new File(cpRoot)).listFiles();
System.out.println(cpList.length);
for(File cpDir : cpList) {
System.out.println(cpDir.getName());
spotbugsTest(cpDir);
}
}
public static void spotbugsTest(File cpDir) throws IOException {
String cpPath = cpDir.getAbsolutePath()+"\\";
ArrayList<Pair<String, String>> diffFiles = new ArrayList<Pair<String,String>>();
File diffFile = new File(cpDir.getAbsoluteFile()+"\\diffs.txt");
List<String> lines = FileUtils.readLines(diffFile, "utf-8");
lines.remove(0);
lines.remove(0);
for(String line : lines) {
String[] srcLines = line.split(";")[0].split("/");
String[] dstLines = line.split(";")[1].split("/");
String srcName = srcLines[srcLines.length-1];
srcName = srcName.split("\\.")[0];
String dstName = dstLines[dstLines.length-1];
dstName = dstName.split("\\.")[0];
// System.out.println(srcName);
Pair<String, String> pair = Pair.of(srcName, dstName);
diffFiles.add(pair);
}
String xmlPath = cpPath + "filter.xml";
try {
creatXML(diffFiles, xmlPath);
runSpotTest(cpDir.getAbsolutePath());
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
private static void runSpotTest(String classPath) throws Exception {
String plugin = "D:\\Program Files\\spotbugs-4.4.0\\plugin\\findsecbugs-plugin-1.11.0.jar";
String line = "cmd.exe /C spotbugs -textui -effort:max -pluginList "+plugin+" -sourcePath "+classPath+" > sec_output.txt";
CommandLine cmdLine = CommandLine.parse(line);
DefaultExecutor executor = new DefaultExecutor();
ExecuteWatchdog watchdog = new ExecuteWatchdog(1800000);//timeout 30min
executor.setWatchdog(watchdog);
executor.setWorkingDirectory(new File(classPath));
executor.setExitValue(0);
executor.execute(cmdLine);
}
private static void creatXML(ArrayList<Pair<String, String>> diffFiles, String xmlPath) throws IOException {
//创建dom树
Document doc = DocumentHelper.createDocument();
Element root = doc.addElement("FindBugsFilter", "https://github.com/spotbugs/filter/3.0.0");
root.addAttribute("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance");
root.addAttribute("xsi:schemaLocation",
"https://github.com/spotbugs/filter/3.0.0 https://raw.githubusercontent.com/spotbugs/spotbugs/3.1.0/spotbugs/etc/findbugsfilter.xsd");
Element match = root.addElement("Match");
for(Pair<String, String> pair : diffFiles) {
String srcName = pair.getLeft();
String dstName = pair.getRight();
if(srcName.equals(dstName)) {
Element source = match.addElement("Source");
source.addAttribute("name", srcName);
}else {
Element source1 = match.addElement("Source");
source1.addAttribute("name", srcName);
Element source2 = match.addElement("Source");
source2.addAttribute("name", dstName);
}
}
//dom树输出XML 使用流输出
FileOutputStream out = new FileOutputStream(xmlPath);
//使用格式工具将字符转化
OutputFormat fmt = OutputFormat.createPrettyPrint();//按照漂亮的格式打印出来
fmt.setEncoding("UTF-8");
//XML输出工具
XMLWriter writer = new XMLWriter(out,fmt);
writer.write(doc);
writer.flush();
writer.close();//关闭流,即关闭底层的c语言操作执行器
}
}
| 4,154 | 31.209302 | 138 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/collect/testcase/MultiTestRun.java
|
package collect.testcase;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import org.apache.commons.exec.CommandLine;
import org.apache.commons.exec.DefaultExecuteResultHandler;
import org.apache.commons.exec.DefaultExecutor;
import org.apache.commons.exec.ExecuteException;
import org.apache.commons.exec.ExecuteWatchdog;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.filefilter.DirectoryFileFilter;
import org.apache.commons.io.filefilter.FileFilterUtils;
public class MultiTestRun {
public static void main(String[] args) throws Exception {
String cpRoot = "I:\\20210714-Srqtrans_testcase\\Vulnerability_testset_full";
// countRepos(cpRoot);
autoTest(cpRoot);
// String testPath = "I:\\20210714-Srqtrans_testcase\\Vulnerability_testset_full\\cp994\\4f942064d85454a4bcc4da04cd482d114816c14a\\uaa";
// Collection<File> collections = FileUtils.listFiles(new File(testPath),
// FileFilterUtils.suffixFileFilter("build.gradle"), null);
// ArrayList<File> gradleFilters = new ArrayList<>(collections);
// System.out.println(gradleFilters.size());
// runGradleTest(testPath);
}
public static void autoTest(String cpRoot) throws Exception {
File[] cpList = (new File(cpRoot)).listFiles();
System.out.println(cpList.length);
int count = 0;
String continue_point = "cp1118";
Boolean afterPoint = false;
for(File cpFile : cpList) {
if(cpFile.getName().equals(continue_point)) {
count++;
afterPoint = true;
continue;
}
if(!afterPoint) {
count++;
continue;
}
File[] cpDirs = cpFile.listFiles();
System.out.println("test");
for(File cpDir : cpDirs) {
if(cpDir.isDirectory()) {
String classPath = cpDir.listFiles()[0].getAbsolutePath();
File directory = cpDir.listFiles()[0];
System.err.println(count+" "+cpFile.getName()+": "+cpDir.getName());
Collection<File> collections = FileUtils.listFiles(directory,
FileFilterUtils.suffixFileFilter("build.gradle"), null);
ArrayList<File> gradleFilters = new ArrayList<>(collections);
collections.clear();
collections = FileUtils.listFiles(directory,
FileFilterUtils.suffixFileFilter("pom.xml"), null);
ArrayList<File> mavenFilters = new ArrayList<>(collections);
collections.clear();
collections = FileUtils.listFiles(directory,
FileFilterUtils.suffixFileFilter("build.xml"), null);
ArrayList<File> antFilters = new ArrayList<>(collections);
try {
if(gradleFilters.size()!=0&&mavenFilters.size()==0
&&antFilters.size()==0) {
runGradleTest(classPath);
}else if(mavenFilters.size()!=0&&gradleFilters.size()==0
&&antFilters.size()==0) {
runMavenTest(classPath);
}else if(antFilters.size()!=0&&gradleFilters.size()==0
&&mavenFilters.size()==0) {
runAntTest(classPath);
}else {
throw new Exception("check it! "+classPath);
}
Thread.sleep(3000);
} catch (Exception e) {
System.err.println(count+" "+cpFile.getName()+": "+cpDir.getName());
// TODO: handle exception
}
}
}
count++;
}
}
public static void countRepos(String cpRoot) throws IOException {
File[] cpList = (new File(cpRoot)).listFiles();
HashSet<String> repoNames = new HashSet<String>();
for(File cpFile : cpList) {
String diffPath = cpFile.getAbsolutePath()+"\\diffs.txt";
List<String> lines = FileUtils.readLines(new File(diffPath), "UTF-8");
String repoName = lines.get(0);
repoNames.add(repoName);
}
for(String name : repoNames) {
System.out.println(name);
}
}
private static void runGradleTest(String classPath) throws Exception {
String line = "cmd.exe /C gradlew.bat check --continue";
CommandLine cmdLine = CommandLine.parse(line);
DefaultExecutor executor = new DefaultExecutor();
ExecuteWatchdog watchdog = new ExecuteWatchdog(1800000);//timeout 30min
executor.setWatchdog(watchdog);
executor.setWorkingDirectory(new File(classPath));
executor.setExitValue(1);
executor.execute(cmdLine);
}
private static void runMavenTest(String classPath) throws Exception {
String line = "cmd.exe /C mvn test -DtestFailureIgnore=true";
CommandLine cmdLine = CommandLine.parse(line);
DefaultExecutor executor = new DefaultExecutor();
ExecuteWatchdog watchdog = new ExecuteWatchdog(1800000);//timeout 30min
executor.setWatchdog(watchdog);
executor.setWorkingDirectory(new File(classPath));
executor.setExitValue(1);
executor.execute(cmdLine);
}
private static void runAntTest(String classPath) throws Exception {
String line = "cmd.exe /C ant test -keep-going";
CommandLine cmdLine = CommandLine.parse(line);
DefaultExecutor executor = new DefaultExecutor();
ExecuteWatchdog watchdog = new ExecuteWatchdog(1800000);//timeout 30min
executor.setWatchdog(watchdog);
executor.setWorkingDirectory(new File(classPath));
executor.setExitValue(1);
executor.execute(cmdLine);
}
}
| 5,098 | 34.165517 | 137 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/collect/testcase/SearchLocation.java
|
package collect.testcase;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.filefilter.RegexFileFilter;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.Pair;
import java.io.File;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class SearchLocation {
private static int total_match = 0;
public static void main(String[] args) throws Exception {
String cpRoot = "I:\\20210714-Srqtrans_testcase\\Vulnerability_testset_full\\";
String diffPath = "D:\\workspace\\eclipse2018\\Migration\\data_num";
// searchLocation(new File(cpRoot+"cp12"));
// multiSearch(cpRoot);
compareLocation(cpRoot, diffPath);
}
public static void compareLocation(String cpRoot, String diff_location) throws Exception {
File[] cpList = (new File(cpRoot)).listFiles();
System.out.println(cpList.length);
ArrayList<String> skipList = new ArrayList<String>();
skipList.add("cp19");
skipList.add("cp233");
skipList.add("cp241");
skipList.add("cp433");
skipList.add("cp870");
int totalNum = 0;
for(int i=0;i<cpList.length;i++) {
File cpDir = cpList[i];
if(skipList.contains((cpDir.getName())))
continue;
System.out.println("Analyse: "+cpDir.getName());
File matchFile = new File(cpDir.getAbsoluteFile()+"\\matchLines1.txt");
List<String> matchLines = FileUtils.readLines(matchFile, "utf-8");
System.out.println(matchLines.size());
Collection<File> collections = FileUtils.listFiles(new File(diff_location),
new RegexFileFilter(".*"+cpDir.getName()+".txt"), null);
ArrayList<File> filters = new ArrayList<>(collections);
// System.out.println(filters.get(0).getName());
List<String> diffLines = FileUtils.readLines(filters.get(0), "utf-8");
HashMap<String, ArrayList<Integer>> matchMap = analyseSpotsResults(matchLines);
HashMap<String, ArrayList<ArrayList<Integer>>> diffMap = analyseGumtreeResults(diffLines);
totalNum += compareMap(matchMap, diffMap);
}
System.out.println(totalNum);
System.out.println(total_match);
}
private static int compareMap(HashMap<String, ArrayList<Integer>> matchMap,
HashMap<String, ArrayList<ArrayList<Integer>>> diffMap) {
int DiffNum = 0;
int matchNum = 0;
for(Map.Entry<String, ArrayList<ArrayList<Integer>>> entry : diffMap.entrySet()) {
String fileName = entry.getKey();
ArrayList<ArrayList<Integer>> lineList = entry.getValue();
if(matchMap.containsKey(fileName)) {
for(ArrayList<Integer> lineNums : lineList) {
ArrayList<Integer> matchList = matchMap.get(fileName);
for(int n : matchList){
if(lineNums.contains(n)){
matchNum++;
total_match++;
}
}
}
}
DiffNum += lineList.size();
}
System.out.println("Total:"+DiffNum);
System.out.println("Match:"+matchNum);
return DiffNum;
}
public static HashMap<String, ArrayList<ArrayList<Integer>>> analyseGumtreeResults(List<String> diffLines) throws Exception {
HashMap<String, ArrayList<ArrayList<Integer>>> diffMap = new HashMap<String, ArrayList<ArrayList<Integer>>>();
for(String line : diffLines) {
String[] splits = line.split(";");
String fileName = splits[0].split("\\\\")[splits[0].split("\\\\").length-1]
.split("\\.")[0];
String lineNum = splits[2];
int begin = Integer.valueOf(lineNum.split("->")[0].split(",")[0]);
int end = Integer.valueOf(lineNum.split("->")[1].split(",")[0]);
ArrayList tmpList = new ArrayList();
for(int i=begin;i<=end;i++) {
tmpList.add(i);
}
if(fileName!=null&&lineNum!=null) {
if(diffMap.containsKey(fileName)) {
diffMap.get(fileName).add(tmpList);
}else {
ArrayList<ArrayList<Integer>> lineList = new ArrayList();
lineList.add(tmpList);
diffMap.put(fileName, lineList);
}
}else
throw new Exception("Check location!");
}
return diffMap;
}
public static HashMap<String, ArrayList<Integer>> analyseSpotsResults(List<String> matchLines) throws Exception {
String regex = "[aA]t\\b.*.java:\\[line\\s\\d+\\]";
Pattern p = Pattern.compile(regex);
HashMap<String, ArrayList<Integer>> matchMap = new HashMap<String, ArrayList<Integer>>();
for(String line : matchLines) {
// System.out.println(line);
Matcher m = p.matcher(line);
if (m.find()) {
String location = m.group();
// System.out.println("Found value: " + location);
if(location.contains("at")&&location.contains("At")) {
if(StringUtils.countMatches(location, "[")==2&&StringUtils.countMatches(location, "]")==2) {
String location1 = location.split("]")[0]+"]";
addMap(matchMap, location1);
}
Pattern p1 = Pattern.compile("At\\b.*.java:\\[line\\s\\d+\\]");
Matcher m1 = p1.matcher(location);
m1.find();
String location2 = m1.group();
addMap(matchMap, location2);
}else {
addMap(matchMap, location);
}
}else
continue;
}
return matchMap;
}
private static HashMap<String, ArrayList<Integer>> addMap(
HashMap<String, ArrayList<Integer>> map, String location) throws Exception {
String fileName = location.split("\\.java")[0];
fileName = fileName.substring(3, fileName.length());
String lineNum = location.split("\\.java")[1];
lineNum = lineNum.substring(7, lineNum.length()-1);
if(fileName!=null&&lineNum!=null) {
// System.out.println("Name:"+fileName);
// System.out.println(lineNum);
if(map.containsKey(fileName)) {
map.get(fileName).add(Integer.valueOf(lineNum));
}else {
ArrayList<Integer> lineList = new ArrayList<Integer>();
lineList.add(Integer.valueOf(lineNum));
map.put(fileName, lineList);
}
}else {
System.out.println(location);
System.out.println(fileName);
System.out.println(lineNum);
throw new Exception("Check location!");
}
return map;
}
public static void multiSearch(String cpRoot) throws Exception {
File[] cpList = (new File(cpRoot)).listFiles();
System.out.println(cpList.length);
for(File cpDir : cpList) {
System.out.println(cpDir.getName());
searchLocation(cpDir);
}
}
public static void searchLocation(File cpDir) throws Exception {
ArrayList<Pair<String, String>> diffFiles = new ArrayList<Pair<String,String>>();
ArrayList<String> matchList = new ArrayList<String>();
File diffFile = new File(cpDir.getAbsoluteFile()+"\\diffs.txt");
List<String> lines = FileUtils.readLines(diffFile, "utf-8");
lines.remove(0);
lines.remove(0);
for(String line : lines) {
String[] srcLines = line.split(";")[0].split("/");
String[] dstLines = line.split(";")[1].split("/");
String srcName = srcLines[srcLines.length-1];
srcName = srcName.split("\\.")[0];
String dstName = dstLines[dstLines.length-1];
dstName = dstName.split("\\.")[0];
Pair<String, String> pair = Pair.of(srcName, dstName);
diffFiles.add(pair);
}
File diffFile1 = new File(cpDir.getAbsoluteFile()+"\\sec_output.txt");
List<String> lines1 = FileUtils.readLines(diffFile1, "utf-8");
for(Pair<String, String> pair : diffFiles) {
String srcName = pair.getLeft();
System.out.println(srcName);
for(String line : lines1) {
// System.out.println(line);
if(line.contains(srcName)) {
System.out.println("Find: "+line);
matchList.add(line);
}
}
}
File writeFile = new File(cpDir.getAbsoluteFile()+"\\matchLines1.txt");
FileUtils.writeLines(writeFile, matchList);
}
}
| 7,753 | 33.15859 | 126 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/collect/testcase/SearchUncompiledProject.java
|
package collect.testcase;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.filefilter.DirectoryFileFilter;
import org.apache.commons.io.filefilter.FileFilterUtils;
import org.apache.commons.lang3.tuple.Pair;
/**
* @author chi
* 搜索跑完自动化测试用例的项目,是否有未编译成功的
*/
public class SearchUncompiledProject {
public static void main(String[] args) throws Exception {
String cpRoot = "I:\\20210714-Srqtrans_testcase\\Vulnerability_testset_full\\";
ArrayList<String> uncompileList = startMultiSearch(cpRoot);
for(String name : uncompileList) {
File cpDir = new File(cpRoot+name);
ArrayList<Pair<String, String>> missFiles = searchSingleProject(cpDir);
String repoName = getRepoName(cpDir);
System.out.println(name+","+repoName);
for(Pair<String, String> pair : missFiles) {
String srcName = pair.getLeft();
String dstName = pair.getRight();
System.out.println(srcName+","+dstName);
}
}
}
public static ArrayList<String> startMultiSearch(String cpRoot) throws IOException {
File[] cpList = (new File(cpRoot)).listFiles();
ArrayList<String> uncompileList = new ArrayList<String>();
for(File cpDir : cpList) {
ArrayList<Pair<String, String>> missFiles = searchSingleProject(cpDir);
if(missFiles.size()>0)
uncompileList.add(cpDir.getName());
}
return uncompileList;
}
public static ArrayList<Pair<String, String>> searchSingleProject(File cpDir) throws IOException {
ArrayList<Pair<String, String>> diffFiles = new ArrayList<Pair<String,String>>();
ArrayList<Pair<String, String>> missFiles = new ArrayList<Pair<String,String>>();
File diffFile = new File(cpDir.getAbsoluteFile()+"\\diffs.txt");
List<String> lines = FileUtils.readLines(diffFile, "utf-8");
lines.remove(0);
lines.remove(0);
for(String line : lines) {
String[] srcLines = line.split(";")[0].split("/");
String[] dstLines = line.split(";")[1].split("/");
String srcName = srcLines[srcLines.length-1];
srcName = srcName.split("\\.")[0]+".class";
String dstName = dstLines[dstLines.length-1];
dstName = dstName.split("\\.")[0]+".class";
// System.out.println(srcName);
Pair<String, String> pair = Pair.of(srcName, dstName);
diffFiles.add(pair);
}
for(Pair<String, String> pair : diffFiles) {
String srcName = pair.getLeft();
String dstName = pair.getRight();
if(srcName.equals(dstName)) {
Collection<File> collections = FileUtils.listFiles(cpDir,
FileFilterUtils.suffixFileFilter(srcName), DirectoryFileFilter.INSTANCE);
ArrayList<File> filters = new ArrayList<>(collections);
if(filters.size()<2) {
missFiles.add(pair);
}
}else {
Collection<File> collections1 = FileUtils.listFiles(cpDir,
FileFilterUtils.suffixFileFilter(srcName), DirectoryFileFilter.INSTANCE);
ArrayList<File> filters1 = new ArrayList<>(collections1);
Collection<File> collections2 = FileUtils.listFiles(cpDir,
FileFilterUtils.suffixFileFilter(dstName), DirectoryFileFilter.INSTANCE);
ArrayList<File> filters2 = new ArrayList<>(collections2);
if(filters1.size()<1||filters2.size()<1) {
missFiles.add(pair);
}
}
}
return missFiles;
}
private static String getRepoName(File cpDir) throws IOException {
File diffFile = new File(cpDir.getAbsoluteFile()+"\\diffs.txt");
List<String> lines = FileUtils.readLines(diffFile, "utf-8");
String repoName = lines.get(0);
return repoName;
}
}
| 3,588 | 35.252525 | 101 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/collect/testcase/TestMissingUnkonw.java
|
package collect.testcase;
import java.io.File;
import org.eclipse.jgit.lib.ObjectId;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.revwalk.RevCommit;
import org.eclipse.jgit.revwalk.RevWalk;
import org.eclipse.jgit.storage.file.FileRepositoryBuilder;
public class TestMissingUnkonw {
public static void main(String[] args) throws Exception {
// TODO Auto-generated method stub
String classPath="J:\\git_repo\\tomcat70\\";
String commit = "a4bfa01d4e6fd677f6831ab7b3e513c8b94c6185";
FileRepositoryBuilder builder = new FileRepositoryBuilder();
builder.setMustExist(true);
builder.addCeilingDirectory(new File(classPath));
builder.findGitDir(new File(classPath));
Repository repo;
repo = builder.build();
RevWalk walk = new RevWalk(repo);
ObjectId versionId=repo.resolve(commit);
RevCommit currentCommit=walk.parseCommit(versionId);
}
}
| 885 | 29.551724 | 62 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/structure/API.java
|
package structure;
import java.util.ArrayList;
public class API {
private String longName;
private String className;
private String methodName;
private ArrayList<String> params;
public API(String longName, String className, String methodName, ArrayList<String> params) {
this.longName = longName;
this.className = className;
this.methodName = methodName;
this.params = params;
}
public String getLongName() {
return longName;
}
public String getClassName() {
return className;
}
public String getMethodName() {
return methodName;
}
public ArrayList<String> getParams() {
return params;
}
public void setLongName(String longName) {
this.longName = longName;
}
public void setClassName(String className) {
this.className = className;
}
public void setMethodName(String methodName) {
this.methodName = methodName;
}
public void setParams(ArrayList<String> params) {
this.params = params;
}
@Override
public int hashCode() {
// TODO Auto-generated method stub
return super.hashCode();
}
@Override
public boolean equals(Object obj) {
if(obj instanceof API) {
API targetAPI = (API) obj;
String targetLongName = targetAPI.getLongName();
ArrayList<String> params = targetAPI.getParams();
if(targetLongName.equals(this.longName)) {
if(params==null&&this.params==null) {
return true;
}else {
if(this.params.size()!=params.size()) {
return false;
}else {
for(int i=0;i<params.size();i++) {
String param1 = params.get(i);
String param2 = this.params.get(i);
if(!param1.equals(param2)) {
return false;
}
}
}
}
}else {
return false;
}
return true;
}
return false;
}
}
| 1,769 | 16.878788 | 93 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/structure/ChangePair.java
|
package structure;
import java.util.List;
import org.eclipse.jgit.diff.DiffEntry;
import org.eclipse.jgit.revwalk.RevCommit;
public class ChangePair {
private RevCommit newCommit;
private RevCommit oldCommit;
private String rootPath;
private String repoName;
private List<DiffEntry> diffs;
public ChangePair(RevCommit newCommit, RevCommit oldCommit, List<DiffEntry> diffs) {
this.newCommit = newCommit;
this.oldCommit = oldCommit;
this.diffs = diffs;
}
public ChangePair() {}
public RevCommit getNewCommit() {
return newCommit;
}
public RevCommit getOldCommit() {
return oldCommit;
}
public String getRootPath() {
return rootPath;
}
public String getRepoName() {
return repoName;
}
public void setRootPath(String rootPath) {
this.rootPath = rootPath;
}
public void setRepoName(String repoName) {
this.repoName = repoName;
}
public List<DiffEntry> getDiffs() {
return diffs;
}
public void setNewCommit(RevCommit newCommit) {
this.newCommit = newCommit;
}
public void setOldCommit(RevCommit oldCommit) {
this.oldCommit = oldCommit;
}
public void setDiffs(List<DiffEntry> diffs) {
this.diffs = diffs;
}
}
| 1,176 | 17.983871 | 85 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/structure/Diff.java
|
package structure;
public class Diff {
private String oldCommitId;
private String newCommitId;
private String oldPath;
private String newPath;
private String addLine;
private String deleteLine;
private int oldBeginLine;
private int oldEndLine = 0;
private int newBeginLine = 0;
private int newEndLine = 0;
public Diff(String oldCommitId, String newCommitId, String oldPath,
String newPath, String deleteLine, String addLine) {
this.oldCommitId = oldCommitId;
this.newCommitId = newCommitId;
this.oldPath = oldPath;
this.newPath = newPath;
this.deleteLine = deleteLine;
this.addLine = addLine;
}
public String getOldCommitId() {
return oldCommitId;
}
public String getNewCommitId() {
return newCommitId;
}
public String getOldPath() {
return oldPath;
}
public String getNewPath() {
return newPath;
}
public int getOldBeginLine() {
return oldBeginLine;
}
public int getOldEndLine() {
return oldEndLine;
}
public int getNewBeginLine() {
return newBeginLine;
}
public int getNewEndLine() {
return newEndLine;
}
public void setOldBeginLine(int oldBeginLine) {
this.oldBeginLine = oldBeginLine;
}
public void setOldEndLine(int oldEndLine) {
this.oldEndLine = oldEndLine;
}
public void setNewBeginLine(int newBeginLine) {
this.newBeginLine = newBeginLine;
}
public void setNewEndLine(int newEndLine) {
this.newEndLine = newEndLine;
}
}
| 1,425 | 17.763158 | 69 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/structure/Location.java
|
package structure;
import java.util.List;
import gumtreediff.tree.ITree;
import gumtreediff.tree.TreeContext;
public class Location {
private int BeginLine;
private int LastLine;
private int BeginCol;
private int LastCol;
public Location(SubTree st) {
TreeContext tc = st.getTC();
ITree stRoot = st.getRoot();
List<ITree> nodesList = stRoot.getDescendants();
nodesList.add(stRoot);//srcT所有节点
int BeginLine = 0;
int LastLine = 0;
int BeginCol = 0;
int LastCol = 0;
for(ITree node : nodesList) {
int line = node.getLine();
int col = node.getColumn();
int lastLine = node.getLastLine();
int lastCol = node.getLastColumn();
String type = tc.getTypeLabel(node);
if(!type.equals("block")) {//跳过block节点,该节点会导致lastline为大括号结束位置
if(BeginLine==0&&line!=0) {
BeginLine = line;
}else if(line < BeginLine&&line!=0) {
BeginLine = line;
}//begin line
if(BeginCol==0&&col!=0) {
BeginCol = col;
}else if(col < BeginCol&&col!=0) {
BeginCol = col;
}//begin column
if(lastLine > LastLine) {
LastLine = lastLine;
}//last line
if(lastCol > LastCol) {
LastCol = lastCol;
}//last column
}else if(type.equals("empty_stmt"))//特殊情况
continue;
}
this.BeginLine = BeginLine;
this.BeginCol = BeginCol;
this.LastLine = LastLine;
this.LastCol = LastCol;
System.out.println("location:"+BeginLine+","+LastLine+","+BeginCol+","+LastCol);
}
public int getBeginLine() {
return BeginLine;
}
public int getLastLine() {
return LastLine;
}
public int getBeginCol() {
return BeginCol;
}
public int getLastCol() {
return LastCol;
}
}
| 1,653 | 20.763158 | 82 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/test/Debug.java
|
package test;
import java.io.BufferedWriter;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
import org.eclipse.jgit.api.Git;
import org.eclipse.jgit.api.errors.GitAPIException;
import org.eclipse.jgit.diff.DiffEntry;
import org.eclipse.jgit.diff.DiffFormatter;
import org.eclipse.jgit.diff.RawTextComparator;
import org.eclipse.jgit.lib.ObjectId;
import org.eclipse.jgit.lib.ObjectReader;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.revwalk.RevCommit;
import org.eclipse.jgit.revwalk.RevWalk;
import org.eclipse.jgit.storage.file.FileRepositoryBuilder;
import org.eclipse.jgit.treewalk.CanonicalTreeParser;
public class Debug {
public static void main(String[] args) throws IOException {
// TODO Auto-generated method stub
String versionCommit="d72bd78c19dfb7b57395a66ae8d9269d59a87bd2";//ҪCommit Hash
String path="J:\\git_repo\\cxf\\";//ӦĿڱRepo·
FileRepositoryBuilder builder = new FileRepositoryBuilder();
builder.setMustExist(true);
builder.addCeilingDirectory(new File(path));
builder.findGitDir(new File(path));
Repository repo;
repo = builder.build();
RevWalk walk = new RevWalk(repo);
ObjectId versionId=repo.resolve(versionCommit);
RevCommit currentCommit=walk.parseCommit(versionId);
System.out.println(currentCommit.getName());
List<DiffEntry> diffFix=getChangedFileList(currentCommit, repo);
int count = 0;
System.out.println("size:"+diffFix.size());
for (DiffEntry entry : diffFix) {
ByteArrayOutputStream out = new ByteArrayOutputStream();
DiffFormatter df = new DiffFormatter(out);
df.setDiffComparator(RawTextComparator.WS_IGNORE_ALL);
df.setRepository(repo);
String path1 = "test//diff"+String.valueOf(count)+".txt";
BufferedWriter wr = new BufferedWriter(new FileWriter(new File(path1)));
df.format(entry);
String diffText = out.toString("UTF-8");
// System.out.println(diffText);
wr.append("test"+diffText);
wr.close();
df.close();
count++;
// System.out.println(entry.getOldPath());
}
walk.close();
// RevWalk walk2 = new RevWalk(repo);
// ObjectId versionId2=repo.resolve(versionCommit);
// RevCommit verCommit2=walk2.parseCommit(versionId2);
// List<DiffEntry> diffFix2=RunJGit.getChangedFileList(verCommit2,repo);
// for (DiffEntry entry : diffFix2) {
// System.out.println(entry.getNewPath());
// }
}
public static List<DiffEntry> getChangedFileList(RevCommit revCommit, Repository repo) {
List<DiffEntry> returnDiffs = null;
try {
RevCommit previousCommit=getPrevHash(revCommit,repo);
if(previousCommit==null)
return null;
ObjectId head=revCommit.getTree().getId();
ObjectId oldHead=previousCommit.getTree().getId();
System.out.println("Printing diff between the Revisions: " + revCommit.getName() + " and " + previousCommit.getName());
// prepare the two iterators to compute the diff between
ObjectReader reader = repo.newObjectReader();
CanonicalTreeParser oldTreeIter = new CanonicalTreeParser();
oldTreeIter.reset(reader, oldHead);
CanonicalTreeParser newTreeIter = new CanonicalTreeParser();
newTreeIter.reset(reader, head);
// finally get the list of changed files
try (Git git = new Git(repo)) {
List<DiffEntry> diffs= git.diff()
.setNewTree(newTreeIter)
.setOldTree(oldTreeIter)
.call();
for (DiffEntry entry : diffs) {
// System.out.println("Entry: " + entry);
}
returnDiffs=diffs;
} catch (GitAPIException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return returnDiffs;
}
public static RevCommit getPrevHash(RevCommit commit, Repository repo) throws IOException {
try(RevWalk walk = new RevWalk(repo)){
// Starting point
walk.markStart(commit);
int count = 0;
for (RevCommit rev : walk) {
// got the previous commit.
if (count == 1) {
return rev;
}
count++;
}
walk.dispose();
}
//Reached end and no previous commits.
return null;
}
static void printTime(int commitTime) {
SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
String timestampString=String.valueOf(commitTime);
Long timestamp = Long.parseLong(timestampString) * 1000;
String date = formatter.format(new Date(timestamp));
System.out.println(date);
}
}
| 4,861 | 33.978417 | 122 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/test/MavenConfig.java
|
package test;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.filefilter.DirectoryFileFilter;
import org.apache.commons.io.filefilter.FileFilterUtils;
import org.dom4j.Document;
import org.dom4j.DocumentException;
import org.dom4j.Element;
import org.dom4j.io.SAXReader;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.Writer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
public class MavenConfig {
/**
* @author chi
* @version add ignore failure
*/
public static void main(String[] args) throws Exception {
String cpRoot = "D:\\Program Files\\languagetool-master";
searchURL(cpRoot);
}
public static void searchURL(String cpRoot) throws Exception {
File cpDir = new File(cpRoot);
System.out.println(cpDir.getName());
Collection<File> collections = FileUtils.listFiles(cpDir,
FileFilterUtils.suffixFileFilter("pom.xml"), DirectoryFileFilter.INSTANCE);
ArrayList<File> filters = new ArrayList<>(collections);
for(File mavenBuild : filters) {
addTextContent(mavenBuild);
// replaceTextContent(mavenBuild);
}
}
/**
* failure
* @throws IOException
*/
public static void addTextContent(File replaceFile) throws IOException{
SAXReader reader = new SAXReader();
try {
Document read = reader.read(replaceFile);
// // ȡڵ
Element root = read.getRootElement();
ArrayList<Element> nodeList = new ArrayList<Element>();
nodeList = getNodes(root, nodeList);
for(Element e : nodeList) {
if(e.getText().equals("maven-surefire-plugin")) {
System.out.println("find test");
Element par = e.getParent();
Iterator eles = par.elementIterator();
while(eles.hasNext()) {
Element child = (Element)eles.next();
if(child.getName().equals("configuration")) {
Element addElement = child.addElement("testFailureIgnore");
addElement.addText("true");
}
}
}
}
//[7]document.xmlļмɡ
Writer writer=new FileWriter(replaceFile);
read.write(writer);
//writerԼģҪرգ
//رմԴ
writer.close();
} catch (DocumentException e) {
e.printStackTrace();
}
}
public static ArrayList<Element> getNodes(Element node, ArrayList<Element> nodeList){
// System.out.println("--------------------");
// //ǰڵơıݺ
// System.out.println("ǰڵƣ"+node.getName());//ǰڵ
// System.out.println("ǰڵݣ"+node.getTextTrim());//ǰڵ
// List<Attribute> listAttr=node.attributes();//ǰڵԵlist
// for(Attribute attr:listAttr){//ǰڵ
// String name=attr.getName();//
// String value=attr.getValue();//Եֵ
// System.out.println("ƣ"+name+"ֵ"+value);
// }
//ݹǰڵеӽڵ
List<Element> listElement=node.elements();//һӽڵlist
nodeList.addAll(listElement);
for(Element e:listElement){//һӽڵ
getNodes(e, nodeList);//ݹ
}
return nodeList;
}
}
| 3,382 | 30.915094 | 91 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/test/RunJGit.java
|
package test;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import org.eclipse.jgit.storage.file.FileRepositoryBuilder;
import org.eclipse.jgit.treewalk.CanonicalTreeParser;
import org.eclipse.jgit.api.Git;
import org.eclipse.jgit.api.errors.GitAPIException;
import org.eclipse.jgit.diff.DiffEntry;
import org.eclipse.jgit.diff.DiffEntry.ChangeType;
import org.eclipse.jgit.lib.ObjectId;
import org.eclipse.jgit.lib.ObjectReader;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.revwalk.RevCommit;
import org.eclipse.jgit.revwalk.RevWalk;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.json.simple.parser.ParseException;
public class RunJGit {
static void printTime(int commitTime) {
SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
String timestampString=String.valueOf(commitTime);
Long timestamp = Long.parseLong(timestampString) * 1000;
String date = formatter.format(new Date(timestamp));
System.out.println(date);
}
static List<DiffEntry> getChangedFileList(RevCommit revCommit, Repository repo) {
List<DiffEntry> returnDiffs = null;
try {
RevCommit previsouCommit=getPrevHash(revCommit,repo);
if(previsouCommit==null)
return null;
ObjectId head=revCommit.getTree().getId();
ObjectId oldHead=previsouCommit.getTree().getId();
System.out.println("Printing diff between the Revisions: " + revCommit.getName() + " and " + previsouCommit.getName());
// prepare the two iterators to compute the diff between
try (ObjectReader reader = repo.newObjectReader()) {
CanonicalTreeParser oldTreeIter = new CanonicalTreeParser();
oldTreeIter.reset(reader, oldHead);
CanonicalTreeParser newTreeIter = new CanonicalTreeParser();
newTreeIter.reset(reader, head);
// finally get the list of changed files
try (Git git = new Git(repo)) {
List<DiffEntry> diffs= git.diff()
.setNewTree(newTreeIter)
.setOldTree(oldTreeIter)
.call();
for (DiffEntry entry : diffs) {
// System.out.println("Entry: " + entry);
}
returnDiffs=diffs;
} catch (GitAPIException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return returnDiffs;
}
public static RevCommit getPrevHash(RevCommit commit, Repository repo) throws IOException {
try (RevWalk walk = new RevWalk(repo)) {
// Starting point
walk.markStart(commit);
int count = 0;
for (RevCommit rev : walk) {
// got the previous commit.
if (count == 1) {
return rev;
}
count++;
}
walk.dispose();
}
//Reached end and no previous commits.
return null;
}
public static void main(String[] args) throws IOException {
// TODO Auto-generated method stub
// String versionCommit="d0a64168441a08aa323a6881db444b935c186ee3";//ȷ���Ķ�Ӧ�汾��Commit hash��
// String versionCommit="d1453fda74bff3d908218207229be1c72fe65166";//Xalan
// String versionCommit="c79cb6f1a7bbdbba11dd130f54bb6b69a90c4777";//Abdera
// String versionCommit="c13c4c115f6ceaa1a2fd8786ed638c30b0e4f505";//Camel-2.1.0
String versionCommit="d1453fda74bff3d908218207229be1c72fe65166";//Xalan 2.4.0
File perforFile = new File("Bug-Analysis-Results.csv");
FileWriter fw = new FileWriter(perforFile, true);
PrintWriter out = new PrintWriter(fw);
ArrayList<Object[]> commitPairArray=new ArrayList<Object[]> ();
// String path="D:\\Projects\\Subject-Versions\\abdera";
// String path="D:\\Projects\\Subject-Versions\\camel";
String path="D:\\Projects\\Subject-Versions\\xalan-j";
FileRepositoryBuilder builder = new FileRepositoryBuilder();
builder.setMustExist(true);
builder.addCeilingDirectory(new File(path));
builder.findGitDir(new File(path));
JSONParser parser = new JSONParser();
try {
Object obj2 = parser.parse(new FileReader("Xalan-2.4-Severe/fix_and_introducers_pairs.json"));
JSONArray jsonObject2 = (JSONArray) obj2;
Iterator pairs = jsonObject2.iterator();
while(pairs.hasNext()) {
JSONArray eachPair=(JSONArray) pairs.next();
Object[] eachArrayPair=eachPair.toArray();
commitPairArray.add(eachArrayPair);
}
Repository repo=builder.build();
RevWalk walk = new RevWalk(repo);
ObjectId versionId=repo.resolve(versionCommit);
RevCommit verCommit=walk.parseCommit(versionId);
int verCommitTime=verCommit.getCommitTime();
System.out.println("The sepecific version's commit time: ");
printTime(verCommitTime);
// String issuePath="issue_list.json";
// String input = FileUtils.readFileToString(new File(issuePath), "UTF-8");
Object obj = parser.parse(new FileReader("Xalan-2.4-Severe/issue_list.json"));
JSONObject jsonObject = (JSONObject) obj;
Set issues=jsonObject.keySet();
for(Object issue:issues) {
JSONObject content=(JSONObject) jsonObject.get(issue);
String commitHash=(String) content.get("hash");
ObjectId objId = repo.resolve(commitHash);
RevWalk walk2 = new RevWalk(repo);
RevCommit revCommit = walk2.parseCommit(objId);
ArrayList<String> fixFiles=new ArrayList<String>();
ArrayList<String> thisIssueFile=new ArrayList<String>();
System.out.println("The issue: "+issue);
System.out.println("Analyzing the commit: "+commitHash);
int fixTime=revCommit.getCommitTime();
if(verCommitTime>fixTime)
continue;
List<DiffEntry> diffFix=getChangedFileList(revCommit,repo);
if(diffFix==null)
continue;
for (DiffEntry entry : diffFix) {
fixFiles.add(entry.getNewPath());
// if(entry.getChangeType().equals(ChangeType.MODIFY)) {
// System.out.println(entry.getNewPath());
// fixFiles.add(entry.getNewPath());
// }
}
for (int i = 0; i < commitPairArray.size(); i++) {
Object[] eachArrayPair=commitPairArray.get(i);
String fixCommit=(String)eachArrayPair[0];
if(fixCommit.equals(commitHash)) {
// System.out.println(fixCommit+","+(String)eachArrayPair[1]);
String introHash=(String)eachArrayPair[1];
// String introHash="ecf89a60bfd8089d1b1de5666bd2e9d5938abe8e";
ObjectId introId = repo.resolve(introHash);
RevWalk walk3 = new RevWalk(repo);
RevCommit introCommit = walk3.parseCommit(introId);
int introTime=introCommit.getCommitTime();
if(introTime<=verCommitTime && verCommitTime<=fixTime) {
System.out.println("Find!!!");
System.out.println("The fix's time: ");
printTime(fixTime);
System.out.println(commitHash);
System.out.println(fixFiles);
System.out.println("------------------");
System.out.println("The introduce time: ");
printTime(introTime);
System.out.println(introHash);
ArrayList<String> introFiles=new ArrayList<String>();
List<DiffEntry> diffIntro=getChangedFileList(introCommit,repo);
if(diffIntro==null)
continue;
for (DiffEntry entry : diffIntro) {
introFiles.add(entry.getNewPath());
// if(entry.getChangeType().equals(ChangeType.MODIFY)||entry.getChangeType().equals(ChangeType.ADD)) {
// System.out.println(entry.getNewPath());
// introFiles.add(entry.getNewPath());
// }
}
System.out.println(introFiles);
System.out.println("------------------");
introFiles.retainAll(fixFiles);
System.out.println("The remained list:");
System.out.println(introFiles);
if(introFiles.size()!=0)
for(String eachFile:introFiles) {
if(!thisIssueFile.contains(eachFile))
thisIssueFile.add(eachFile);
}
System.out.println("------------------");
}
}
}
if(thisIssueFile.size()!=0) {
out.write(issue+","+String.join(",", thisIssueFile)+"\n");
out.flush();
}
}
} catch (IOException | ParseException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
| 9,266 | 37.936975 | 124 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/test/TestGeneration.java
|
package test;
import gumtreediff.actions.ActionGenerator;
import gumtreediff.actions.model.*;
import gumtreediff.gen.srcml.SrcmlJavaTreeGenerator;
import gumtreediff.io.ActionsIoUtils;
import gumtreediff.io.TreeIoUtils;
import gumtreediff.matchers.Mapping;
import gumtreediff.matchers.MappingStore;
import gumtreediff.matchers.Matcher;
import gumtreediff.matchers.Matchers;
import gumtreediff.tree.ITree;
import gumtreediff.tree.TreeContext;
import utils.Utils;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
public class TestGeneration {
public static void main(String args[]) throws Exception{
// String path = "astra_frame_listener.cpp";
String path = "DefaultActionInvocation2.java";
// String path = "migrations_test\\astra_driver\\astra_driver.cpp";
File cppfile = new File(path);
TreeContext tc = new SrcmlJavaTreeGenerator().generateFromFile(cppfile);
ITree root = tc.getRoot();
System.out.println(root.getId()+","+tc.getTypeLabel(root));
// String path2 = "astra_frame_listener2.cpp";
String path2 = "DefaultActionInvocation.java";
// String path2 = "migrations_test\\astra_driver\\astra_driver2.cpp";
File cppfile2 = new File(path2);
TreeContext tc2 = new SrcmlJavaTreeGenerator().generateFromFile(cppfile2);
ITree root2 = tc2.getRoot();
System.out.println(root2.getId()+","+tc2.getTypeLabel(root2));
System.out.println(tc.getRoot().getId());
System.out.println(tc2.getRoot().getId());
Matcher m = Matchers.getInstance().getMatcher(tc.getRoot(), tc2.getRoot());
m.match();
MappingStore mappings = m.getMappings();
HashMap<Integer, Integer> mapping = new HashMap<>();
for(Mapping map : mappings) {
ITree src = map.getFirst();
ITree dst = map.getSecond();
// System.out.println("Mapping:"+src.getId()+"->"+dst.getId());
mapping.put(src.getId(), dst.getId());
}
System.out.println("mapSize:"+mapping.size());
HashMap<Integer, Integer> mapping1 = new HashMap<>();
for(Mapping map : m.getMappings()) {
ITree src = map.getFirst();
ITree dst = map.getSecond();
// System.out.println("Mapping:"+src.getId()+"->"+dst.getId());
mapping1.put(src.getId(), dst.getId());
}
System.out.println("mapSize:"+mapping1.size());
ActionGenerator g = new ActionGenerator(tc.getRoot(), tc2.getRoot(), m.getMappings());
List<Action> actions = g.generate();
String out1 = "testMapping.txt";
BufferedWriter wr1 = new BufferedWriter(new FileWriter(out1));
// System.out.println(ActionsIoUtils.toJson(tc, g.getActions(), m.getMappings()).toString());
wr1.append(ActionsIoUtils.toXml(tc, g.getActions(), m.getMappings()).toString());
wr1.flush();
wr1.close();
// ITree root3 = tc2.getRoot();
// System.out.println(root3.getId()+","+tc2.getTypeLabel(root3));
// if(root3.getParent()==null)
// System.out.println(true);
// else System.out.println(false);
Utils.checkTCRoot(tc);
Utils.checkTCRoot(tc2);
// Pruning pt = new Pruning(tc, tc2, mappings);
// pt.pruneTree();//Prune the tree.
String out = "testGraph.txt";
BufferedWriter wr = new BufferedWriter(new FileWriter(out));
wr.append(TreeIoUtils.toDot(tc, mappings, actions, true).toString());
wr.flush();
wr.close();
String out2 = "testGraph2.txt";
BufferedWriter wr2 = new BufferedWriter(new FileWriter(out2));
wr2.append(TreeIoUtils.toDot(tc2, mappings, actions, false).toString());
wr2.flush();
wr2.close();
// System.out.println(TreeIoUtils.toDot(tc, mappings, actions, true).toString());
// System.out.println(TreeIoUtils.toDot(tc2, mappings, actions, false).toString());
// System.out.println(TreeIoUtils.toXml(tc).toString());
// for(Map.Entry<Integer, Integer> entry : mapping.entrySet()) {
// System.out.println(entry.getKey()+"->"+entry.getValue());
// }
System.out.println("ActionSize:" + actions.size());
for (Action a : actions) {
ITree src = a.getNode();
if (a instanceof Move) {
ITree dst = mappings.getDst(src);
System.out.println(((Move)a).toString());
} else if (a instanceof Update) {
ITree dst = mappings.getDst(src);
System.out.println(((Update)a).toString());
} else if (a instanceof Insert) {
ITree dst = a.getNode();
System.out.println(((Insert)a).toString());
} else if (a instanceof Delete) {
System.out.println(((Delete)a).toString());
}
}
// System.out.println(ActionsIoUtils.toXml(tc, g.getActions(), m.getMappings()).toString());
// System.out.println(ActionsIoUtils.toText(tc, g.getActions(), m.getMappings()).toString());
// for(ITree c : t.getChildren()) {
// if(c.getLabel()!=null)
// System.out.println(c.getLabel());
// }
List<ITree> nodes = new ArrayList<>();
nodes = Utils.collectNode(tc2.getRoot(), nodes);
System.out.println(nodes.size());
}
}
| 5,460 | 39.451852 | 100 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/test/test.java
|
package test;
import java.io.File;
import java.io.IOException;
import java.util.Map;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.exec.CommandLine;
import org.apache.commons.exec.DefaultExecuteResultHandler;
import org.apache.commons.exec.DefaultExecutor;
import org.apache.commons.exec.ExecuteException;
import org.eclipse.jgit.api.Git;
import org.eclipse.jgit.lib.Ref;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.storage.file.FileRepositoryBuilder;
public class test {
public static void main(String[] args) throws Exception{
String classPath = "J:\\git_repo\\okhttp";
FileRepositoryBuilder builder = new FileRepositoryBuilder();
builder.setMustExist(true);
builder.addCeilingDirectory(new File(classPath));
builder.findGitDir(new File(classPath));
Repository repo;
repo = builder.build();
Map<String, Ref> maps = repo.getAllRefs();
for(Map.Entry<String, Ref> entry : maps.entrySet()) {
String key = entry.getKey();
System.out.println(repo.shortenRefName(key));
}
// Set<String> refs = repo.getRemoteNames();
// for(String name : refs) {
// System.out.println(name);
// }
}
private void test1() throws Exception, IOException {
System.out.println("RunExec");
String versionCommit="78351302b0761178581d92612b528f6eea529618";//ҪCommit Hash
String path="D:\\workspace\\eclipse2018\\Migration\\OpenNMT-py\\";//ӦĿڱRepo·
String line = "cmd.exe /C git checkout "+versionCommit;
CommandLine cmdLine = CommandLine.parse(line);
DefaultExecuteResultHandler resultHandler = new DefaultExecuteResultHandler();
DefaultExecutor executor = new DefaultExecutor();
executor.setWorkingDirectory(new File(path));
executor.setExitValue(1); //ִ˳ֵΪ1ɹִвûд1
executor.execute(cmdLine, resultHandler);
resultHandler.waitFor();
}
}
| 1,861 | 34.132075 | 82 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/test/test1.java
|
//package test;
//
//import gumtreediff.tree.hash.StaticHashGenerator;
//
//public class test1 {
//
// private static final ClassLoader var1 =
// ClassLoadingAwareObjectInputStream.calss.getClassLoader();
// private static String[] var2;
//
// public static boolean isAllAllowed() {
// return getSerialziablePackages().length == num1 &&
// getSerialziablePackages()[num2].equals("liter1");
// }
//
// private static final String [ ] var1;
// private static final ClientRootCertificate [ ] var1 = values ( );
// private static final Map [ ] var1;
// private static final Set < String > var1;
// private static final Java7Support var1;
//
// return var1 . length == num && var1 [ num ] . equals ( "" );
// return var1 . length == num && var1 [ num ] . equals ( "" ) );
// return var1 . length == num && var1 [ num ] . equals ( "" ] );
// return var1 . length == num && var1 [ num ];
// return var1 . length ( );
//}
| 920 | 31.892857 | 68 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/test/test2.java
|
package test;
import java.io.*;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.dom4j.Attribute;
import org.dom4j.Document;
import org.dom4j.DocumentException;
import org.dom4j.Element;
import org.dom4j.io.SAXReader;
/**
* @author liu
* @version 创建时间:2018年3月26日 上午11:22:29
* 使用DOM4J解析xml文件
*/
public class test2 {
public static void main(String[] args) throws IOException {
SAXReader reader = new SAXReader();
try {
Document read = reader.read("I:\\20210714-Srqtrans_testcase\\Vulnerability_testset_full"
+ "\\cp1069\\4ed66e5838476e575a83c3cd13fffb37eefa2f48\\jenkins\\core\\pom.xml");
// // 获取根节点
Element root = read.getRootElement();
ArrayList<Element> nodeList = new ArrayList<Element>();
nodeList = getNodes(root, nodeList);
for(Element e : nodeList) {
if(e.getText().equals("maven-surefire-plugin")) {
System.out.println("find it");
Element par = e.getParent();
Iterator eles = par.elementIterator();
while(eles.hasNext()) {
Element child = (Element)eles.next();
if(child.getName().equals("configuration")) {
Element addElement = child.addElement("testFailureIgnore");
addElement.addText("true");
}
}
}
}
//[7]将document对象输出到.xml文件中即可。
Writer writer=new FileWriter(new File("I:\\20210714-Srqtrans_testcase\\Vulnerability_testset_full"
+ "\\cp1069\\4ed66e5838476e575a83c3cd13fffb37eefa2f48\\jenkins\\core\\pom1.xml"));
read.write(writer);
//writer是自己创建的,最后还需要关闭:
//关闭打开的资源:
writer.close();
} catch (DocumentException e) {
e.printStackTrace();
}
}
public static ArrayList<Element> getNodes(Element node, ArrayList<Element> nodeList){
// System.out.println("--------------------");
// //当前节点的名称、文本内容和属性
// System.out.println("当前节点名称:"+node.getName());//当前节点名称
// System.out.println("当前节点的内容:"+node.getTextTrim());//当前节点名称
// List<Attribute> listAttr=node.attributes();//当前节点的所有属性的list
// for(Attribute attr:listAttr){//遍历当前节点的所有属性
// String name=attr.getName();//属性名称
// String value=attr.getValue();//属性的值
// System.out.println("属性名称:"+name+"属性值:"+value);
// }
//递归遍历当前节点所有的子节点
List<Element> listElement=node.elements();//所有一级子节点的list
nodeList.addAll(listElement);
for(Element e:listElement){//遍历所有一级子节点
getNodes(e, nodeList);//递归
}
return nodeList;
}
}
| 2,680 | 33.818182 | 110 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/test/testIO.java
|
package test;
import java.util.ArrayList;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class testIO {
public static void main(String[] args) throws Exception{
ArrayList<String> tests = new ArrayList();
tests.add("abc");
if(tests.get(0).contains("ab"))
System.out.println("True");
else
System.out.println("False");
}
private static void test(){
String test = "M B JUA: Assertion of type org.springframework.messaging.converter.CompositeMessageConverter in org.springframework.web.socket.config.MessageBrokerBeanDefinitionParserTests.annotationMethodMessageHandler() at MessageBrokerBeanDefinitionParserTests.java:[line 337]"
+ " may hide useful information about why a cast may have failed. At MessageBrokerBeanDefinitionParserTests.java:[line 337]";
String pattern = "[aA]t\\b.*.java:\\[line\\s\\d+\\]";
Pattern p = Pattern.compile(pattern);
Matcher m = p.matcher(test);
m.find();
System.out.println(m.group());
String location1 = m.group().split("]")[0]+"]";
System.out.println(location1);
Pattern p1 = Pattern.compile("At\\b.*.java:\\[line\\s\\d+\\]");
Matcher m1 = p1.matcher(m.group());
m1.find();
String location2 = m1.group();
System.out.println(location2);
if (m.find()) {
String name = m.group().split("\\.java")[0];
System.out.println(name);
name = name.substring(3, name.length());
System.out.println("Found value: " + name);
} else {
System.out.println("NO MATCH");
}
String test1 = "I:\\20210714-Srqtrans_testcase\\Vulnerability_testcase\\cp12\\8748ba4c4b96a8a82c20c88373b2fcc77c30011f\\spring-messaging\\src\\main\\java\\org\\springframework\\messaging\\simp\\broker\\SimpleBrokerMessageHandler.java";
String fileName = test1.split("\\\\")[test1.split("\\\\").length-1].split("\\.")[0];
System.out.println(fileName);
}
}
| 1,852 | 33.962264 | 282 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/test/testJGit.java
|
package test;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.eclipse.jgit.errors.IncorrectObjectTypeException;
import org.eclipse.jgit.lib.ObjectId;
import org.eclipse.jgit.lib.Ref;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.revwalk.RevCommit;
import org.eclipse.jgit.revwalk.RevSort;
import org.eclipse.jgit.revwalk.RevWalk;
import org.eclipse.jgit.storage.file.FileRepositoryBuilder;
public class testJGit {
public static void main(String[] args) throws Exception {
String classPath = "F:\\tmp\\activemq\\";
String commit = "943158555356e8caa1068f6bde02596569b1d391";
FileRepositoryBuilder builder = new FileRepositoryBuilder();
builder.setMustExist(true);
builder.addCeilingDirectory(new File(classPath));
builder.findGitDir(new File(classPath));
Repository repo;
repo = builder.build();
testJGit test = new testJGit();
List<ObjectId> branchesIds = test.getAllBranchObjectId(null, repo);
System.out.println(branchesIds.size());
RevWalk revWalk = test.getAllRevWalk(branchesIds, repo);
Iterator<RevCommit> iter = revWalk.iterator();
ArrayList<RevCommit> commits1 = new ArrayList<>();
while (iter.hasNext()) {
RevCommit tmp = iter.next();
commits1.add(tmp);
}
System.out.println(commits1.size());
RevWalk walk = new RevWalk(repo);
ObjectId versionId=repo.resolve(commit);
RevCommit currentCommit = walk.parseCommit(versionId);
ArrayList<RevCommit> commits2 = test.getCommits(currentCommit, repo);
System.out.println(commits2.size());
}
/**
* Get jsonops ids of all the branches named like %branch% in a repository
* If branch == null, retrieve all branches
*/
public List<ObjectId> getAllBranchObjectId(String branch, Repository repo) throws Exception{
List<ObjectId> currentRemoteRefs = new ArrayList<ObjectId>();
for (Ref ref: repo.getRefDatabase().getRefs()){
String refName = ref.getName();
if (branch == null || refName.endsWith("/" + branch)) {
currentRemoteRefs.add(ref.getObjectId());
}
}
return currentRemoteRefs;
}
public RevWalk getAllRevWalk(List<ObjectId> remoteRefs, Repository repo) throws Exception{
RevWalk walk = createReverseRevWalk(repo);
for (ObjectId refId: remoteRefs){
RevCommit start;
try {
start = walk.parseCommit(refId);
} catch (IncorrectObjectTypeException e){
continue;
}
walk.markStart(start);
}
// Filter all merge changes
//walk.setRevFilter(RevFilter.NO_MERGES);
return walk;
}
private RevWalk createReverseRevWalk(Repository repo){
RevWalk walk = new RevWalk(repo);
walk.sort(RevSort.COMMIT_TIME_DESC, true);
walk.sort(RevSort.REVERSE, true);
return walk;
}
public ArrayList<RevCommit> getCommits(RevCommit commit, Repository repo) throws IOException {
ArrayList<RevCommit> commits = new ArrayList<>();
try (RevWalk walk = new RevWalk(repo)) {
// Starting point
walk.markStart(commit);
int count = 0;
for (RevCommit rev : walk) {
// got the previous commit.
commits.add(rev);
count++;
}
walk.dispose();
}
//Reached end and no previous commits.
return commits;
}
}
| 3,557 | 32.885714 | 97 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/utils/CountCPs.java
|
package utils;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.io.FileUtils;
public class CountCPs {
public static void main(String[] args) throws Exception{
String path = "J:\\Vulnerability_commit";
List<String> cpList = countCPs(path);
System.out.println(cpList.size());
}
public static List<String> countCPs(String path) throws IOException{
List<String> cpList = new ArrayList<String>();
File root = new File(path);
File[] cpRoots = root.listFiles();
for(File cpRoot : cpRoots) {
File diffFile = new File(cpRoot.getAbsoluteFile()+"\\diffs.txt");
List<String> lines = FileUtils.readLines(diffFile, "UTF-8");
lines.remove(0);
lines.remove(0);
cpList.addAll(lines);
}
return cpList;
}
}
| 808 | 24.28125 | 69 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/utils/FileOperation.java
|
package utils;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
public class FileOperation {
public static void copyFile(File sourcefile,File targetFile) throws IOException{
File parent = targetFile.getParentFile();
if(!parent.exists()){
parent.mkdirs();
}
//新建文件输入流并对它进行缓冲
FileInputStream input=new FileInputStream(sourcefile);
BufferedInputStream inbuff=new BufferedInputStream(input);
//新建文件输出流并对它进行缓冲
FileOutputStream out=new FileOutputStream(targetFile);
BufferedOutputStream outbuff=new BufferedOutputStream(out);
///缓冲数组
byte[] b=new byte[1024*5];
int len=0;
while((len=inbuff.read(b))!=-1){
outbuff.write(b, 0, len);
}
outbuff.flush(); //刷新此缓冲的输出流
inbuff.close();//关闭流
outbuff.close();
out.close();
input.close();
}
public static void copyDirectiory(String sourceDir,String targetDir) throws IOException{
//新建目标目录
File source = new File(sourceDir);
File target = new File(targetDir);
if(!target.exists()){
target.mkdirs();
}
//获取源文件夹当下的文件或目录
File[] file=source.listFiles();
for (int i = 0; i < file.length; i++) {
if(file[i].isFile()){
//源文件
File sourceFile=file[i];
//目标文件
File targetFile=new File(new File(targetDir).getAbsolutePath()+File.separator+file[i].getName());
copyFile(sourceFile, targetFile);
}
if(file[i].isDirectory()){
//准备复制的源文件夹
String dir1=sourceDir+"\\"+file[i].getName();
//准备复制的目标文件夹
String dir2=targetDir+"\\"+file[i].getName();
copyDirectiory(dir1, dir2);
}
}
}
public static void delFolder(String folderPath) {
try {
delAllFile(folderPath); //删除完里面所有内容
String filePath = folderPath;
filePath = filePath.toString();
java.io.File myFilePath = new java.io.File(filePath);
myFilePath.delete(); //删除空文件夹
} catch (Exception e) {
e.printStackTrace();
}
}//删除指定文件夹下所有文件param path 文件夹完整绝对路径
public static boolean delAllFile(String path) {
boolean flag = false;
File file = new File(path);
if (!file.exists()) {
return flag;
}
if (!file.isDirectory()) {
return flag;
}
String[] tempList = file.list();
File temp = null;
for (int i = 0; i < tempList.length; i++) {
if (path.endsWith(File.separator)) {
temp = new File(path + tempList[i]);
} else {
temp = new File(path + File.separator + tempList[i]);
}
if (temp.isFile()) {
temp.delete();
}
if (temp.isDirectory()) {
delAllFile(path + "/" + tempList[i]);//先删除文件夹里面的文件
delFolder(path + "/" + tempList[i]);//再删除空文件夹
flag = true;
}
}
return flag;
}
public static void traverseFolder(String path, ArrayList<File> fileList) {
File dir = new File(path);
if (dir.exists()) {
File[] files = dir.listFiles();
if (files.length == 0) {
System.out.println("error length!");
} else {
for (File file : files) {
if (file.isDirectory()) {
traverseFolder(file.getAbsolutePath(), fileList);
} else {
if(file.getName().contains(".java"))
fileList.add(file);
}
}
}
} else {
System.out.println("dir not exists");
}
}
}
| 3,999 | 30.25 | 136 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/utils/MergeData.java
|
package utils;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.util.ArrayList;
public class MergeData {
/**
* Merge training files into the final training set
* @throws Exception
*/
public static void main(String[] args) throws Exception {
String path = "jsons";
// mergeTrainingSet(path);
mergeJsonSet(path);
}
public static void mergeJsonSet(String dir) throws Exception {
String path = "Json_merge\\";
File outputDir = new File(path);
if (!outputDir.exists()) {
outputDir.mkdirs();
}
if(outputDir.listFiles().length!=0)
throw new Exception("plz clean the dir!");
File rootFile = new File(dir);
File[] dirs = rootFile.listFiles();
int count = 0;
for(File cpDir : dirs) {
System.out.println("ļ: "+cpDir.getName());
File[] files = cpDir.listFiles();
if(files.length==0)
continue;
for(File cpFile : files) {
String name = cpFile.getName();
String newPath = path+"pair"+String.valueOf(count)+"_src.json";
if(name.contains("src")) {
String index = name.split("_")[0];
for(int i=0;i<files.length;i++) {
File cpFile1 = files[i];
String name1 = cpFile1.getName();
// System.out.println(name1);
if(name1.contains("tgt")&&name1.contains(index)) {
String newPath1 = path+"pair"+String.valueOf(count)+"_tgt.json";
File targetFile = new File(newPath);
File targetFile1 = new File(newPath1);
FileOperation.copyFile(cpFile, targetFile);
FileOperation.copyFile(cpFile1, targetFile1);
count++;
break;
}else if(i==files.length-1)
throw new Exception("error case, not find tgt json!");
}
}
}
}
}
public static void mergeTrainingSet(String dir) throws Exception {
File dirFile = new File(dir);
File[] files = dirFile.listFiles();
ArrayList<String> defuses = new ArrayList<String>();
ArrayList<String> srcs = new ArrayList<String>();
ArrayList<String> tgts = new ArrayList<String>();
for(File file : files) {
String name = file.getName();
BufferedReader br = new BufferedReader(new FileReader(file));
String tmpline = "";
ArrayList<String> lines = new ArrayList<String>();
while((tmpline = br.readLine())!=null) {
lines.add(tmpline);
}
if(name.contains("defuse")) {
for(String line : lines)
defuses.add(line);
}else if(name.contains("src-val")) {
for(String line : lines)
srcs.add(line);
}else if (name.contains("tgt-val")) {
for(String line : lines)
tgts.add(line);
}
br.close();
}
BufferedWriter wr = new BufferedWriter(new FileWriter(new File("defuse.txt")));
BufferedWriter wr1 = new BufferedWriter(new FileWriter(new File("src-val.txt")));
BufferedWriter wr2 = new BufferedWriter(new FileWriter(new File("tgt-val.txt")));
for(String line : defuses) {
wr.append(line);
wr.newLine();
wr.flush();
}
for(String line : srcs) {
wr1.append(line);
wr1.newLine();
wr1.flush();
}
for(String line : tgts) {
wr2.append(line);
wr2.newLine();
wr2.flush();
}
wr.close();
wr1.close();
wr2.close();
}
}
| 3,269 | 27.938053 | 83 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/utils/ReadAPI.java
|
package utils;
import java.io.*;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.LinkedHashSet;
import structure.API;
public class ReadAPI {
public static void main(String[] args) throws Exception{
String path = "apis_test";
LinkedHashSet<API> apis = readAPI(path);
System.out.println(apis.size());
// for(API api : apis) {
// System.out.println("-------------");
// System.out.println(api.getLongName());
// System.out.println(api.getClassName());
// System.out.println(api.getMethodName());
// for(String tmp : api.getParams()) {
// System.out.println(tmp);
// }
// }
}
public static LinkedHashSet<API> readAPI(String path) throws Exception {
LinkedHashSet<API> apis = new LinkedHashSet<API>();
File dir = new File(path);
File[] files = dir.listFiles();
for(File file : files) {
BufferedReader br = new BufferedReader(new FileReader(file));
String tmpline = "";
while((tmpline=br.readLine())!=null) {
String className = "";
String methodName = "";
ArrayList<String> params = new ArrayList<String>();
String longName = tmpline.split(";")[0];
if(tmpline.split(";").length>1) {
String[] tmps = tmpline.split(";")[1].split(",");
for(String tmp : tmps) {
params.add(tmp);
}
}
if(longName.contains("Anon_")) {//ڲȥ.(Anon_xx)
continue;
// className = longName.substring(0, longName.indexOf("(")-1);
// methodName = longName.substring(longName.indexOf(")")+1, longName.length());
}else {
className = longName.substring(0, longName.lastIndexOf("."));
methodName = longName.substring(longName.lastIndexOf(".")+1, longName.length());
}
longName = className+"."+methodName;
API api = new API(longName, className, methodName, params);
apis.add(api);
}
br.close();
}
return apis;
}
public static HashSet<String> readClass(String path) throws Exception {
HashSet<String> classes = new HashSet<String>();
File dir = new File(path);
File[] files = dir.listFiles();
for(File file : files) {
BufferedReader br = new BufferedReader(new FileReader(file));
String tmpline = "";
while((tmpline=br.readLine())!=null) {
String api = tmpline;
String className = "";
if(api.contains("Anon_")) {//ڲȥ.(Anon_xx)
className = api.substring(0, api.indexOf("(")-1);
}else {
className = api.substring(0, api.indexOf("(")-1);
className = className.substring(0, className.lastIndexOf("."));
}
classes.add(className);
}
br.close();
}
return classes;
}
}
| 2,565 | 29.188235 | 85 |
java
|
SeqTrans
|
SeqTrans-master/Migration/src/utils/SearchTag.java
|
package utils;
import java.io.File;
import java.io.IOException;
import java.util.Collection;
import java.util.LinkedList;
import java.util.List;
import org.eclipse.jgit.api.Git;
import org.eclipse.jgit.api.LogCommand;
import org.eclipse.jgit.api.errors.GitAPIException;
import org.eclipse.jgit.lib.ObjectId;
import org.eclipse.jgit.lib.Ref;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.revwalk.RevCommit;
import org.eclipse.jgit.revwalk.RevTag;
import org.eclipse.jgit.storage.file.FileRepositoryBuilder;
public class SearchTag {
public static void main(String[] args) throws Exception{
String path = "I:\\uaa";
String commit = "5dc5ca9176ed5baa870680d99f37e7e559dddc5d";
searchTag(path, commit);
}
public static void searchTag(String classPath, String commit) throws Exception {
FileRepositoryBuilder builder = new FileRepositoryBuilder();
builder.setMustExist(true);
builder.addCeilingDirectory(new File(classPath));
builder.findGitDir(new File(classPath));
Repository repo;
repo = builder.build();
Git git = new Git(repo);
List<Ref> list = git.tagList().call();
ObjectId commitId = ObjectId.fromString(commit);
// Collection<String> commits = new LinkedList<String>();
// for (Ref tag : list) {
// System.out.println(tag.getName());
// ObjectId object = tag.getObjectId();
//
// System.out.println(object.toString());
// if (object.equals(commitId)) {
// System.out.println(tag.getName());
// break;
// }
// }
for(Ref testTag : list) {
@SuppressWarnings("resource")
LogCommand log = new Git(repo).log();
// System.out.println(testTag.getName());
Ref peeledRef = repo.getRefDatabase().peel(testTag);
if(peeledRef.getPeeledObjectId() != null) {
log.add(peeledRef.getPeeledObjectId());
} else {
log.add(testTag.getObjectId());
}
Iterable<RevCommit> logs = log.call();
for (RevCommit rev : logs) {
if (rev.getId().equals(commitId)) {
System.out.println(testTag.getName());
break;
}
// System.out.println("Commit: " + rev /* + ", name: " + rev.getName() + ", id: " + rev.getId().getName() */);
}
}
git.close();
}
}
| 2,359 | 31.777778 | 125 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/AbstractBuild1.java
|
/*
* The MIT License
*
* Copyright (c) 2004-2010, Sun Microsystems, Inc., Kohsuke Kawaguchi, Yahoo! Inc., CloudBees, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package hudson.model;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSortedSet;
import hudson.AbortException;
import hudson.EnvVars;
import hudson.FilePath;
import hudson.Functions;
import hudson.Launcher;
import jenkins.util.SystemProperties;
import hudson.console.ModelHyperlinkNote;
import hudson.model.Fingerprint.BuildPtr;
import hudson.model.Fingerprint.RangeSet;
import hudson.model.labels.LabelAtom;
import hudson.model.listeners.RunListener;
import hudson.model.listeners.SCMListener;
import hudson.remoting.ChannelClosedException;
import hudson.remoting.RequestAbortedException;
import hudson.scm.ChangeLogParser;
import hudson.scm.ChangeLogSet;
import hudson.scm.ChangeLogSet.Entry;
import hudson.scm.NullChangeLogParser;
import hudson.scm.SCM;
import hudson.scm.SCMRevisionState;
import hudson.slaves.NodeProperty;
import hudson.slaves.WorkspaceList;
import hudson.slaves.WorkspaceList.Lease;
import hudson.slaves.OfflineCause;
import hudson.tasks.BuildStep;
import hudson.tasks.BuildStepMonitor;
import hudson.tasks.BuildTrigger;
import hudson.tasks.BuildWrapper;
import hudson.tasks.Builder;
import hudson.tasks.Fingerprinter.FingerprintAction;
import hudson.tasks.Publisher;
import hudson.util.*;
import jenkins.model.Jenkins;
import org.kohsuke.stapler.HttpResponse;
import org.kohsuke.stapler.Stapler;
import org.kohsuke.stapler.StaplerRequest;
import org.kohsuke.stapler.StaplerResponse;
import org.kohsuke.stapler.export.Exported;
import org.xml.sax.SAXException;
import javax.servlet.ServletException;
import java.io.File;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.lang.ref.WeakReference;
import java.util.AbstractSet;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.CheckForNull;
import javax.annotation.Nonnull;
import org.kohsuke.stapler.interceptor.RequirePOST;
import static java.util.logging.Level.WARNING;
import jenkins.model.lazy.BuildReference;
import jenkins.model.lazy.LazyBuildMixIn;
import org.kohsuke.accmod.Restricted;
import org.kohsuke.accmod.restrictions.DoNotUse;
/**
* Base implementation of {@link Run}s that build software.
*
* For now this is primarily the common part of {@link Build} and MavenBuild.
*
* @author Kohsuke Kawaguchi
* @see AbstractProject
*/
public abstract class AbstractBuild<P extends AbstractProject<P,R>,R extends AbstractBuild<P,R>> extends Run<P,R> implements Queue.Executable, LazyBuildMixIn.LazyLoadingRun<P,R> {
/**
* Set if we want the blame information to flow from upstream to downstream build.
*/
private static final boolean upstreamCulprits = SystemProperties.getBoolean("hudson.upstreamCulprits");
/**
* Name of the agent this project was built on.
* Null or "" if built by the master. (null happens when we read old record that didn't have this information.)
*/
private String builtOn;
/**
* The file path on the node that performed a build. Kept as a string since {@link FilePath} is not serializable into XML.
* @since 1.319
*/
private String workspace;
/**
* Version of Hudson that built this.
*/
private String hudsonVersion;
/**
* SCM used for this build.
*/
private ChangeLogParser scm;
/**
* Changes in this build.
*/
private volatile transient WeakReference<ChangeLogSet<? extends Entry>> changeSet;
/**
* Cumulative list of people who contributed to the build problem.
*
* <p>
* This is a list of {@link User#getId() user ids} who made a change
* since the last non-broken build. Can be null (which should be
* treated like empty set), because of the compatibility.
*
* <p>
* This field is semi-final --- once set the value will never be modified.
*
* @since 1.137
*/
private volatile Set<String> culprits;
/**
* During the build this field remembers {@link hudson.tasks.BuildWrapper.Environment}s created by
* {@link BuildWrapper}. This design is bit ugly but forced due to compatibility.
*/
protected transient List<Environment> buildEnvironments;
private transient final LazyBuildMixIn.RunMixIn<P,R> runMixIn = new LazyBuildMixIn.RunMixIn<P,R>() {
@Override protected R asRun() {
return _this();
}
};
protected AbstractBuild(P job) throws IOException {
super(job);
}
protected AbstractBuild(P job, Calendar timestamp) {
super(job, timestamp);
}
protected AbstractBuild(P project, File buildDir) throws IOException {
super(project, buildDir);
}
public final P getProject() {
return getParent();
}
@Override public final LazyBuildMixIn.RunMixIn<P,R> getRunMixIn() {
return runMixIn;
}
@Override protected final BuildReference<R> createReference() {
return getRunMixIn().createReference();
}
@Override protected final void dropLinks() {
getRunMixIn().dropLinks();
}
@Override
public R getPreviousBuild() {
return getRunMixIn().getPreviousBuild();
}
@Override
public R getNextBuild() {
return getRunMixIn().getNextBuild();
}
/**
* Returns a {@link Slave} on which this build was done.
*
* @return
* null, for example if the agent that this build run no longer exists.
*/
public @CheckForNull Node getBuiltOn() {
if (builtOn==null || builtOn.equals(""))
return Jenkins.getInstance();
else
return Jenkins.getInstance().getNode(builtOn);
}
/**
* Returns the name of the agent it was built on; null or "" if built by the master.
* (null happens when we read old record that didn't have this information.)
*/
@Exported(name="builtOn")
public String getBuiltOnStr() {
return builtOn;
}
/**
* Allows subtypes to set the value of {@link #builtOn}.
* This is used for those implementations where an {@link AbstractBuild} is made 'built' without
* actually running its {@link #run()} method.
*
* @since 1.429
*/
protected void setBuiltOnStr( String builtOn ) {
this.builtOn = builtOn;
}
/**
* Gets the nearest ancestor {@link AbstractBuild} that belongs to
* {@linkplain AbstractProject#getRootProject() the root project of getProject()} that
* dominates/governs/encompasses this build.
*
* <p>
* Some projects (such as matrix projects, Maven projects, or promotion processes) form a tree of jobs,
* and still in some of them, builds of child projects are related/tied to that of the parent project.
* In such a case, this method returns the governing build.
*
* @return never null. In the worst case the build dominates itself.
* @since 1.421
* @see AbstractProject#getRootProject()
*/
public AbstractBuild<?,?> getRootBuild() {
return this;
}
/**
* Used to render the side panel "Back to project" link.
*
* <p>
* In a rare situation where a build can be reached from multiple paths,
* returning different URLs from this method based on situations might
* be desirable.
*
* <p>
* If you override this method, you'll most likely also want to override
* {@link #getDisplayName()}.
*/
public String getUpUrl() {
return Functions.getNearestAncestorUrl(Stapler.getCurrentRequest(),getParent())+'/';
}
/**
* Gets the directory where this build is being built.
*
* <p>
* Note to implementors: to control where the workspace is created, override
* {@link AbstractBuildExecution#decideWorkspace(Node,WorkspaceList)}.
*
* @return
* null if the workspace is on an agent that's not connected. Note that once the build is completed,
* the workspace may be used to build something else, so the value returned from this method may
* no longer show a workspace as it was used for this build.
* @since 1.319
*/
public final @CheckForNull FilePath getWorkspace() {
if (workspace==null) return null;
Node n = getBuiltOn();
if (n==null) return null;
return n.createPath(workspace);
}
/**
* Normally, a workspace is assigned by {@link hudson.model.Run.RunExecution}, but this lets you set the workspace in case
* {@link AbstractBuild} is created without a build.
*/
protected void setWorkspace(@Nonnull FilePath ws) {
this.workspace = ws.getRemote();
}
/**
* Returns the root directory of the checked-out module.
* <p>
* This is usually where <tt>pom.xml</tt>, <tt>build.xml</tt>
* and so on exists.
*/
public final FilePath getModuleRoot() {
FilePath ws = getWorkspace();
if (ws==null) return null;
return getParent().getScm().getModuleRoot(ws, this);
}
/**
* Returns the root directories of all checked-out modules.
* <p>
* Some SCMs support checking out multiple modules into the same workspace.
* In these cases, the returned array will have a length greater than one.
* @return The roots of all modules checked out from the SCM.
*/
public FilePath[] getModuleRoots() {
FilePath ws = getWorkspace();
if (ws==null) return null;
return getParent().getScm().getModuleRoots(ws, this);
}
/**
* List of users who committed a change since the last non-broken build till now.
*
* <p>
* This list at least always include people who made changes in this build, but
* if the previous build was a failure it also includes the culprit list from there.
*
* @return
* can be empty but never null.
*/
@Exported
public Set<User> getCulprits() {
if (culprits==null) {
Set<User> r = new HashSet<User>();
R p = getPreviousCompletedBuild();
if (p !=null && isBuilding()) {
Result pr = p.getResult();
if (pr!=null && pr.isWorseThan(Result.SUCCESS)) {
// we are still building, so this is just the current latest information,
// but we seems to be failing so far, so inherit culprits from the previous build.
// isBuilding() check is to avoid recursion when loading data from old Hudson, which doesn't record
// this information
r.addAll(p.getCulprits());
}
}
for (Entry e : getChangeSet())
r.add(e.getAuthor());
if (upstreamCulprits) {
// If we have dependencies since the last successful build, add their authors to our list
if (getPreviousNotFailedBuild() != null) {
Map <AbstractProject,DependencyChange> depmap = getDependencyChanges(getPreviousSuccessfulBuild());
for (DependencyChange dep : depmap.values()) {
for (AbstractBuild<?,?> b : dep.getBuilds()) {
for (Entry entry : b.getChangeSet()) {
r.add(entry.getAuthor());
}
}
}
}
}
return r;
}
return new AbstractSet<User>() {
public Iterator<User> iterator() {
return new AdaptedIterator<String,User>(culprits.iterator()) {
protected User adapt(String id) {
return User.get(id);
}
};
}
public int size() {
return culprits.size();
}
};
}
/**
* Returns true if this user has made a commit to this build.
*
* @since 1.191
*/
public boolean hasParticipant(User user) {
for (ChangeLogSet.Entry e : getChangeSet())
try{
if (e.getAuthor()==user)
return true;
} catch (RuntimeException re) {
LOGGER.log(Level.INFO, "Failed to determine author of changelog " + e.getCommitId() + "for " + getParent().getDisplayName() + ", " + getDisplayName(), re);
}
return false;
}
/**
* Gets the version of Hudson that was used to build this job.
*
* @since 1.246
*/
public String getHudsonVersion() {
return hudsonVersion;
}
/**
* @deprecated as of 1.467
* Please use {@link hudson.model.Run.RunExecution}
*/
@Deprecated
public abstract class AbstractRunner extends AbstractBuildExecution {
}
public abstract class AbstractBuildExecution extends Runner {
/*
Some plugins might depend on this instance castable to Runner, so we need to use
deprecated class here.
*/
/**
* Since configuration can be changed while a build is in progress,
* create a launcher once and stick to it for the entire build duration.
*/
protected Launcher launcher;
/**
* Output/progress of this build goes here.
*/
protected BuildListener listener;
/**
* Lease of the workspace.
*/
private Lease lease;
/**
* Returns the current {@link Node} on which we are building.
* @return Returns the current {@link Node}
* @throws IllegalStateException if that cannot be determined
*/
protected final @Nonnull Node getCurrentNode() throws IllegalStateException {
Executor exec = Executor.currentExecutor();
if (exec == null) {
throw new IllegalStateException("not being called from an executor thread");
}
Computer c = exec.getOwner();
Node node = c.getNode();
if (node == null) {
throw new IllegalStateException("no longer a configured node for " + c.getName());
}
return node;
}
public Launcher getLauncher() {
return launcher;
}
public BuildListener getListener() {
return listener;
}
/**
* Allocates the workspace from {@link WorkspaceList}.
*
* @param n
* Passed in for the convenience. The node where the build is running.
* @param wsl
* Passed in for the convenience. The returned path must be registered to this object.
*/
protected Lease decideWorkspace(@Nonnull Node n, WorkspaceList wsl) throws InterruptedException, IOException {
String customWorkspace = getProject().getCustomWorkspace();
if (customWorkspace != null) {
// we allow custom workspaces to be concurrently used between jobs.
return Lease.createDummyLease(n.getRootPath().child(getEnvironment(listener).expand(customWorkspace)));
}
// TODO: this cast is indicative of abstraction problem
return wsl.allocate(n.getWorkspaceFor((TopLevelItem)getProject()), getBuild());
}
public Result run(@Nonnull BuildListener listener) throws Exception {
final Node node = getCurrentNode();
assert builtOn==null;
builtOn = node.getNodeName();
hudsonVersion = Jenkins.VERSION;
this.listener = listener;
launcher = createLauncher(listener);
if (!Jenkins.getInstance().getNodes().isEmpty()) {
if (node instanceof Jenkins) {
listener.getLogger().print(Messages.AbstractBuild_BuildingOnMaster());
} else {
listener.getLogger().print(Messages.AbstractBuild_BuildingRemotely(ModelHyperlinkNote.encodeTo("/computer/" + builtOn, builtOn)));
Set<LabelAtom> assignedLabels = new HashSet<LabelAtom>(node.getAssignedLabels());
assignedLabels.remove(node.getSelfLabel());
if (!assignedLabels.isEmpty()) {
boolean first = true;
for (LabelAtom label : assignedLabels) {
if (first) {
listener.getLogger().print(" (");
first = false;
} else {
listener.getLogger().print(' ');
}
listener.getLogger().print(label.getName());
}
listener.getLogger().print(')');
}
}
} else {
listener.getLogger().print(Messages.AbstractBuild_Building());
}
lease = decideWorkspace(node, Computer.currentComputer().getWorkspaceList());
workspace = lease.path.getRemote();
listener.getLogger().println(Messages.AbstractBuild_BuildingInWorkspace(workspace));
node.getFileSystemProvisioner().prepareWorkspace(AbstractBuild.this,lease.path,listener);
for (WorkspaceListener wl : WorkspaceListener.all()) {
wl.beforeUse(AbstractBuild.this, lease.path, listener);
}
getProject().getScmCheckoutStrategy().preCheckout(AbstractBuild.this, launcher, this.listener);
getProject().getScmCheckoutStrategy().checkout(this);
if (!preBuild(listener,project.getProperties()))
return Result.FAILURE;
Result result = doRun(listener);
if (node.getChannel() != null) {
// kill run-away processes that are left
// use multiple environment variables so that people can escape this massacre by overriding an environment
// variable for some processes
launcher.kill(getCharacteristicEnvVars());
}
// this is ugly, but for historical reason, if non-null value is returned
// it should become the final result.
if (result==null) result = getResult();
if (result==null) result = Result.SUCCESS;
return result;
}
/**
* Creates a {@link Launcher} that this build will use. This can be overridden by derived types
* to decorate the resulting {@link Launcher}.
*
* @param listener
* Always non-null. Connected to the main build output.
*/
@Nonnull
protected Launcher createLauncher(@Nonnull BuildListener listener) throws IOException, InterruptedException {
final Node currentNode = getCurrentNode();
Launcher l = currentNode.createLauncher(listener);
if (project instanceof BuildableItemWithBuildWrappers) {
BuildableItemWithBuildWrappers biwbw = (BuildableItemWithBuildWrappers) project;
for (BuildWrapper bw : biwbw.getBuildWrappersList())
l = bw.decorateLauncher(AbstractBuild.this,l,listener);
}
buildEnvironments = new ArrayList<Environment>();
for (RunListener rl: RunListener.all()) {
Environment environment = rl.setUpEnvironment(AbstractBuild.this, l, listener);
if (environment != null) {
buildEnvironments.add(environment);
}
}
for (NodeProperty nodeProperty: Jenkins.getInstance().getGlobalNodeProperties()) {
Environment environment = nodeProperty.setUp(AbstractBuild.this, l, listener);
if (environment != null) {
buildEnvironments.add(environment);
}
}
for (NodeProperty nodeProperty: currentNode.getNodeProperties()) {
Environment environment = nodeProperty.setUp(AbstractBuild.this, l, listener);
if (environment != null) {
buildEnvironments.add(environment);
}
}
return l;
}
public void defaultCheckout() throws IOException, InterruptedException {
AbstractBuild<?,?> build = AbstractBuild.this;
AbstractProject<?, ?> project = build.getProject();
for (int retryCount=project.getScmCheckoutRetryCount(); ; retryCount--) {
build.scm = NullChangeLogParser.INSTANCE;
try {
File changeLogFile = new File(build.getRootDir(), "changelog.xml");
if (project.checkout(build, launcher,listener, changeLogFile)) {
// check out succeeded
SCM scm = project.getScm();
for (SCMListener l : SCMListener.all()) {
try {
l.onCheckout(build, scm, build.getWorkspace(), listener, changeLogFile, build.getAction(SCMRevisionState.class));
} catch (Exception e) {
throw new IOException(e);
}
}
build.scm = scm.createChangeLogParser();
build.changeSet = new WeakReference<ChangeLogSet<? extends Entry>>(build.calcChangeSet());
for (SCMListener l : SCMListener.all())
try {
l.onChangeLogParsed(build,listener,build.getChangeSet());
} catch (Exception e) {
throw new IOException("Failed to parse changelog",e);
}
// Get a chance to do something after checkout and changelog is done
scm.postCheckout( build, launcher, build.getWorkspace(), listener );
return;
}
} catch (AbortException e) {
listener.error(e.getMessage());
} catch (InterruptedIOException e) {
throw (InterruptedException)new InterruptedException().initCause(e);
} catch (IOException e) {
// checkout error not yet reported
Functions.printStackTrace(e, listener.getLogger());
}
if (retryCount == 0) // all attempts failed
throw new RunnerAbortedException();
listener.getLogger().println("Retrying after 10 seconds");
Thread.sleep(10000);
}
}
/**
* The portion of a build that is specific to a subclass of {@link AbstractBuild}
* goes here.
*
* @return
* null to continue the build normally (that means the doRun method
* itself run successfully)
* Return a non-null value to abort the build right there with the specified result code.
*/
protected abstract Result doRun(BuildListener listener) throws Exception, RunnerAbortedException;
/**
* @see #post(BuildListener)
*/
protected abstract void post2(BuildListener listener) throws Exception;
public final void post(BuildListener listener) throws Exception {
try {
post2(listener);
} finally {
// update the culprit list
HashSet<String> r = new HashSet<String>();
for (User u : getCulprits())
r.add(u.getId());
culprits = ImmutableSortedSet.copyOf(r);
CheckPoint.CULPRITS_DETERMINED.report();
}
}
public void cleanUp(BuildListener listener) throws Exception {
if (lease!=null) {
lease.release();
lease = null;
}
BuildTrigger.execute(AbstractBuild.this, listener);
buildEnvironments = null;
}
/**
* @deprecated as of 1.356
* Use {@link #performAllBuildSteps(BuildListener, Map, boolean)}
*/
@Deprecated
protected final void performAllBuildStep(BuildListener listener, Map<?,? extends BuildStep> buildSteps, boolean phase) throws InterruptedException, IOException {
performAllBuildSteps(listener,buildSteps.values(),phase);
}
protected final boolean performAllBuildSteps(BuildListener listener, Map<?,? extends BuildStep> buildSteps, boolean phase) throws InterruptedException, IOException {
return performAllBuildSteps(listener,buildSteps.values(),phase);
}
/**
* @deprecated as of 1.356
* Use {@link #performAllBuildSteps(BuildListener, Iterable, boolean)}
*/
@Deprecated
protected final void performAllBuildStep(BuildListener listener, Iterable<? extends BuildStep> buildSteps, boolean phase) throws InterruptedException, IOException {
performAllBuildSteps(listener,buildSteps,phase);
}
/**
* Runs all the given build steps, even if one of them fail.
*
* @param phase
* true for the post build processing, and false for the final "run after finished" execution.
*
* @return false if any build step failed
*/
protected final boolean performAllBuildSteps(BuildListener listener, Iterable<? extends BuildStep> buildSteps, boolean phase) throws InterruptedException, IOException {
boolean r = true;
for (BuildStep bs : buildSteps) {
if ((bs instanceof Publisher && ((Publisher)bs).needsToRunAfterFinalized()) ^ phase)
try {
if (!perform(bs,listener)) {
LOGGER.log(Level.FINE, "{0} : {1} failed", new Object[] {AbstractBuild.this, bs});
r = false;
if (phase) {
setResult(Result.FAILURE);
}
}
} catch (Exception e) {
reportError(bs, e, listener, phase);
r = false;
} catch (LinkageError e) {
reportError(bs, e, listener, phase);
r = false;
}
}
return r;
}
private void reportError(BuildStep bs, Throwable e, BuildListener listener, boolean phase) {
final String buildStep;
if (bs instanceof Describable) {
buildStep = ((Describable) bs).getDescriptor().getDisplayName();
} else {
buildStep = bs.getClass().getName();
}
if (e instanceof AbortException) {
LOGGER.log(Level.FINE, "{0} : {1} failed", new Object[] {AbstractBuild.this, buildStep});
listener.error("Step ‘" + buildStep + "’ failed: " + e.getMessage());
} else {
String msg = "Step ‘" + buildStep + "’ aborted due to exception: ";
Functions.printStackTrace(e, listener.error(msg));
LOGGER.log(WARNING, msg, e);
}
if (phase) {
setResult(Result.FAILURE);
}
}
/**
* Calls a build step.
*/
protected final boolean perform(BuildStep bs, BuildListener listener) throws InterruptedException, IOException {
BuildStepMonitor mon;
try {
mon = bs.getRequiredMonitorService();
} catch (AbstractMethodError e) {
mon = BuildStepMonitor.BUILD;
}
Result oldResult = AbstractBuild.this.getResult();
for (BuildStepListener bsl : BuildStepListener.all()) {
bsl.started(AbstractBuild.this, bs, listener);
}
boolean canContinue = false;
try {
canContinue = mon.perform(bs, AbstractBuild.this, launcher, listener);
} catch (RequestAbortedException ex) {
// Channel is closed, do not continue
reportBrokenChannel(listener);
} catch (ChannelClosedException ex) {
// Channel is closed, do not continue
reportBrokenChannel(listener);
} catch (RuntimeException ex) {
Functions.printStackTrace(ex, listener.error("Build step failed with exception"));
}
for (BuildStepListener bsl : BuildStepListener.all()) {
bsl.finished(AbstractBuild.this, bs, listener, canContinue);
}
Result newResult = AbstractBuild.this.getResult();
if (newResult != oldResult) {
String buildStepName = getBuildStepName(bs);
listener.getLogger().format("Build step '%s' changed build result to %s%n", buildStepName, newResult);
}
if (!canContinue) {
String buildStepName = getBuildStepName(bs);
listener.getLogger().format("Build step '%s' marked build as failure%n", buildStepName);
}
return canContinue;
}
private void reportBrokenChannel(BuildListener listener) throws IOException {
final Node node = getCurrentNode();
listener.hyperlink("/" + node.toComputer().getUrl() + "log", "Agent went offline during the build");
listener.getLogger().println();
final OfflineCause offlineCause = node.toComputer().getOfflineCause();
if (offlineCause != null) {
listener.error(offlineCause.toString());
}
}
private String getBuildStepName(BuildStep bs) {
if (bs instanceof Describable<?>) {
return ((Describable<?>) bs).getDescriptor().getDisplayName();
} else {
return bs.getClass().getSimpleName();
}
}
protected final boolean preBuild(BuildListener listener,Map<?,? extends BuildStep> steps) {
return preBuild(listener,steps.values());
}
protected final boolean preBuild(BuildListener listener,Collection<? extends BuildStep> steps) {
return preBuild(listener,(Iterable<? extends BuildStep>)steps);
}
protected final boolean preBuild(BuildListener listener,Iterable<? extends BuildStep> steps) {
for (BuildStep bs : steps)
if (!bs.prebuild(AbstractBuild.this,listener)) {
LOGGER.log(Level.FINE, "{0} : {1} failed", new Object[] {AbstractBuild.this, bs});
return false;
}
return true;
}
}
/**
* get the fingerprints associated with this build
*
* @return never null
*/
@Exported(name = "fingerprint", inline = true, visibility = -1)
public Collection<Fingerprint> getBuildFingerprints() {
FingerprintAction fingerprintAction = getAction(FingerprintAction.class);
if (fingerprintAction != null) {
return fingerprintAction.getFingerprints().values();
}
return Collections.<Fingerprint>emptyList();
}
/*
* No need to lock the entire AbstractBuild on change set calculation
*/
private transient Object changeSetLock = new Object();
/**
* Gets the changes incorporated into this build.
*
* @return never null.
*/
@Exported
public ChangeLogSet<? extends Entry> getChangeSet() {
synchronized (changeSetLock) {
if (scm==null) {
scm = NullChangeLogParser.INSTANCE;
}
}
ChangeLogSet<? extends Entry> cs = null;
if (changeSet!=null)
cs = changeSet.get();
if (cs==null)
cs = calcChangeSet();
// defensive check. if the calculation fails (such as through an exception),
// set a dummy value so that it'll work the next time. the exception will
// be still reported, giving the plugin developer an opportunity to fix it.
if (cs==null)
cs = ChangeLogSet.createEmpty(this);
changeSet = new WeakReference<ChangeLogSet<? extends Entry>>(cs);
return cs;
}
@Restricted(DoNotUse.class) // for project-changes.jelly
public List<ChangeLogSet<? extends ChangeLogSet.Entry>> getChangeSets() {
ChangeLogSet<? extends Entry> cs = getChangeSet();
return cs.isEmptySet() ? Collections.<ChangeLogSet<? extends ChangeLogSet.Entry>>emptyList() : Collections.<ChangeLogSet<? extends ChangeLogSet.Entry>>singletonList(cs);
}
/**
* Returns true if the changelog is already computed.
*/
public boolean hasChangeSetComputed() {
File changelogFile = new File(getRootDir(), "changelog.xml");
return changelogFile.exists();
}
private ChangeLogSet<? extends Entry> calcChangeSet() {
File changelogFile = new File(getRootDir(), "changelog.xml");
if (!changelogFile.exists())
return ChangeLogSet.createEmpty(this);
try {
return scm.parse(this,changelogFile);
} catch (IOException e) {
LOGGER.log(WARNING, "Failed to parse "+changelogFile,e);
} catch (SAXException e) {
LOGGER.log(WARNING, "Failed to parse "+changelogFile,e);
}
return ChangeLogSet.createEmpty(this);
}
@Override
public EnvVars getEnvironment(TaskListener log) throws IOException, InterruptedException {
EnvVars env = super.getEnvironment(log);
FilePath ws = getWorkspace();
if (ws!=null) // if this is done very early on in the build, workspace may not be decided yet. see HUDSON-3997
env.put("WORKSPACE", ws.getRemote());
project.getScm().buildEnvVars(this,env);
if (buildEnvironments!=null)
for (Environment e : buildEnvironments)
e.buildEnvVars(env);
for (EnvironmentContributingAction a : getActions(EnvironmentContributingAction.class))
a.buildEnvVars(this,env);
EnvVars.resolve(env);
return env;
}
/**
* During the build, expose the environments contributed by {@link BuildWrapper}s and others.
*
* <p>
* Since 1.444, executor thread that's doing the build can access mutable underlying list,
* which allows the caller to add/remove environments. The recommended way of adding
* environment is through {@link BuildWrapper}, but this might be handy for build steps
* who wants to expose additional environment variables to the rest of the build.
*
* @return can be empty list, but never null. Immutable.
* @since 1.437
*/
public EnvironmentList getEnvironments() {
Executor e = Executor.currentExecutor();
if (e!=null && e.getCurrentExecutable()==this) {
if (buildEnvironments==null) buildEnvironments = new ArrayList<Environment>();
return new EnvironmentList(buildEnvironments);
}
return new EnvironmentList(buildEnvironments==null ? Collections.<Environment>emptyList() : ImmutableList.copyOf(buildEnvironments));
}
public Calendar due() {
return getTimestamp();
}
/**
* {@inheritDoc}
* The action may have a {@code summary.jelly} view containing a {@code <t:summary>} or other {@code <tr>}.
*/
@Override public void addAction(Action a) {
super.addAction(a);
}
@SuppressWarnings("deprecation")
public List<Action> getPersistentActions(){
return super.getActions();
}
/**
* Builds up a set of variable names that contain sensitive values that
* should not be exposed. The expectation is that this set is populated with
* keys returned by {@link #getBuildVariables()} that should have their
* values masked for display purposes.
*
* @since 1.378
*/
public Set<String> getSensitiveBuildVariables() {
Set<String> s = new HashSet<String>();
ParametersAction parameters = getAction(ParametersAction.class);
if (parameters != null) {
for (ParameterValue p : parameters) {
if (p.isSensitive()) {
s.add(p.getName());
}
}
}
// Allow BuildWrappers to determine if any of their data is sensitive
if (project instanceof BuildableItemWithBuildWrappers) {
for (BuildWrapper bw : ((BuildableItemWithBuildWrappers) project).getBuildWrappersList()) {
bw.makeSensitiveBuildVariables(this, s);
}
}
return s;
}
/**
* Provides additional variables and their values to {@link Builder}s.
*
* <p>
* This mechanism is used by {@code MatrixConfiguration} to pass
* the configuration values to the current build. It is up to
* {@link Builder}s to decide whether they want to recognize the values
* or how to use them.
*
* <p>
* This also includes build parameters if a build is parameterized.
*
* @return
* The returned map is mutable so that subtypes can put more values.
*/
public Map<String,String> getBuildVariables() {
Map<String,String> r = new HashMap<String, String>();
ParametersAction parameters = getAction(ParametersAction.class);
if (parameters!=null) {
// this is a rather round about way of doing this...
for (ParameterValue p : parameters) {
String v = p.createVariableResolver(this).resolve(p.getName());
if (v!=null) r.put(p.getName(),v);
}
}
// allow the BuildWrappers to contribute additional build variables
if (project instanceof BuildableItemWithBuildWrappers) {
for (BuildWrapper bw : ((BuildableItemWithBuildWrappers) project).getBuildWrappersList())
bw.makeBuildVariables(this,r);
}
for (BuildVariableContributor bvc : BuildVariableContributor.all())
bvc.buildVariablesFor(this,r);
return r;
}
/**
* Creates {@link VariableResolver} backed by {@link #getBuildVariables()}.
*/
public final VariableResolver<String> getBuildVariableResolver() {
return new VariableResolver.ByMap<String>(getBuildVariables());
}
/**
* @deprecated Use {@link #getAction(Class)} on {@link AbstractTestResultAction}.
*/
@Deprecated
public Action getTestResultAction() {
try {
return getAction(Jenkins.getInstance().getPluginManager().uberClassLoader.loadClass("hudson.tasks.test.AbstractTestResultAction").asSubclass(Action.class));
} catch (ClassNotFoundException x) {
return null;
}
}
/**
* @deprecated Use {@link #getAction(Class)} on {@link AggregatedTestResultAction}.
*/
@Deprecated
public Action getAggregatedTestResultAction() {
try {
return getAction(Jenkins.getInstance().getPluginManager().uberClassLoader.loadClass("hudson.tasks.test.AggregatedTestResultAction").asSubclass(Action.class));
} catch (ClassNotFoundException x) {
return null;
}
}
/**
* Invoked by {@link Executor} to performs a build.
*/
public abstract void run();
//
//
// fingerprint related stuff
//
//
@Override
public String getWhyKeepLog() {
// if any of the downstream project is configured with 'keep dependency component',
// we need to keep this log
OUTER:
for (AbstractProject<?,?> p : getParent().getDownstreamProjects()) {
if (!p.isKeepDependencies()) continue;
AbstractBuild<?,?> fb = p.getFirstBuild();
if (fb==null) continue; // no active record
// is there any active build that depends on us?
for (int i : getDownstreamRelationship(p).listNumbersReverse()) {
// TODO: this is essentially a "find intersection between two sparse sequences"
// and we should be able to do much better.
if (i<fb.getNumber())
continue OUTER; // all the other records are younger than the first record, so pointless to search.
AbstractBuild<?,?> b = p.getBuildByNumber(i);
if (b!=null)
return Messages.AbstractBuild_KeptBecause(b);
}
}
return super.getWhyKeepLog();
}
/**
* Gets the dependency relationship from this build (as the source)
* and that project (as the sink.)
*
* @return
* range of build numbers that represent which downstream builds are using this build.
* The range will be empty if no build of that project matches this (or there is no {@link FingerprintAction}), but it'll never be null.
*/
public RangeSet getDownstreamRelationship(AbstractProject that) {
RangeSet rs = new RangeSet();
FingerprintAction f = getAction(FingerprintAction.class);
if (f==null) return rs;
// look for fingerprints that point to this build as the source, and merge them all
for (Fingerprint e : f.getFingerprints().values()) {
if (upstreamCulprits) {
// With upstreamCulprits, we allow downstream relationships
// from intermediate jobs
rs.add(e.getRangeSet(that));
} else {
BuildPtr o = e.getOriginal();
if (o!=null && o.is(this))
rs.add(e.getRangeSet(that));
}
}
return rs;
}
/**
* Works like {@link #getDownstreamRelationship(AbstractProject)} but returns
* the actual build objects, in ascending order.
* @since 1.150
*/
public Iterable<AbstractBuild<?,?>> getDownstreamBuilds(final AbstractProject<?,?> that) {
final Iterable<Integer> nums = getDownstreamRelationship(that).listNumbers();
return new Iterable<AbstractBuild<?, ?>>() {
public Iterator<AbstractBuild<?, ?>> iterator() {
return Iterators.removeNull(
new AdaptedIterator<Integer,AbstractBuild<?,?>>(nums) {
protected AbstractBuild<?, ?> adapt(Integer item) {
return that.getBuildByNumber(item);
}
});
}
};
}
/**
* Gets the dependency relationship from this build (as the sink)
* and that project (as the source.)
*
* @return
* Build number of the upstream build that feed into this build,
* or -1 if no record is available (for example if there is no {@link FingerprintAction}, even if there is an {@link Cause.UpstreamCause}).
*/
public int getUpstreamRelationship(AbstractProject that) {
FingerprintAction f = getAction(FingerprintAction.class);
if (f==null) return -1;
int n = -1;
// look for fingerprints that point to the given project as the source, and merge them all
for (Fingerprint e : f.getFingerprints().values()) {
if (upstreamCulprits) {
// With upstreamCulprits, we allow upstream relationships
// from intermediate jobs
Fingerprint.RangeSet rangeset = e.getRangeSet(that);
if (!rangeset.isEmpty()) {
n = Math.max(n, rangeset.listNumbersReverse().iterator().next());
}
} else {
BuildPtr o = e.getOriginal();
if (o!=null && o.belongsTo(that))
n = Math.max(n,o.getNumber());
}
}
return n;
}
/**
* Works like {@link #getUpstreamRelationship(AbstractProject)} but returns the
* actual build object.
*
* @return
* null if no such upstream build was found, or it was found but the
* build record is already lost.
*/
public AbstractBuild<?,?> getUpstreamRelationshipBuild(AbstractProject<?,?> that) {
int n = getUpstreamRelationship(that);
if (n==-1) return null;
return that.getBuildByNumber(n);
}
/**
* Gets the downstream builds of this build, which are the builds of the
* downstream projects that use artifacts of this build.
*
* @return
* For each project with fingerprinting enabled, returns the range
* of builds (which can be empty if no build uses the artifact from this build or downstream is not {@link AbstractProject#isFingerprintConfigured}.)
*/
public Map<AbstractProject,RangeSet> getDownstreamBuilds() {
Map<AbstractProject,RangeSet> r = new HashMap<AbstractProject,RangeSet>();
for (AbstractProject p : getParent().getDownstreamProjects()) {
if (p.isFingerprintConfigured())
r.put(p,getDownstreamRelationship(p));
}
return r;
}
/**
* Gets the upstream builds of this build, which are the builds of the
* upstream projects whose artifacts feed into this build.
* @return empty if there is no {@link FingerprintAction} (even if there is an {@link Cause.UpstreamCause})
* @see #getTransitiveUpstreamBuilds()
*/
public Map<AbstractProject,Integer> getUpstreamBuilds() {
return _getUpstreamBuilds(getParent().getUpstreamProjects());
}
/**
* Works like {@link #getUpstreamBuilds()} but also includes all the transitive
* dependencies as well.
*/
public Map<AbstractProject,Integer> getTransitiveUpstreamBuilds() {
return _getUpstreamBuilds(getParent().getTransitiveUpstreamProjects());
}
private Map<AbstractProject, Integer> _getUpstreamBuilds(Collection<AbstractProject> projects) {
Map<AbstractProject,Integer> r = new HashMap<AbstractProject,Integer>();
for (AbstractProject p : projects) {
int n = getUpstreamRelationship(p);
if (n>=0)
r.put(p,n);
}
return r;
}
/**
* Gets the changes in the dependency between the given build and this build.
* @return empty if there is no {@link FingerprintAction}
*/
public Map<AbstractProject,DependencyChange> getDependencyChanges(AbstractBuild from) {
if (from==null) return Collections.emptyMap(); // make it easy to call this from views
FingerprintAction n = this.getAction(FingerprintAction.class);
FingerprintAction o = from.getAction(FingerprintAction.class);
if (n==null || o==null) return Collections.emptyMap();
Map<AbstractProject,Integer> ndep = n.getDependencies(true);
Map<AbstractProject,Integer> odep = o.getDependencies(true);
Map<AbstractProject,DependencyChange> r = new HashMap<AbstractProject,DependencyChange>();
for (Map.Entry<AbstractProject,Integer> entry : odep.entrySet()) {
AbstractProject p = entry.getKey();
Integer oldNumber = entry.getValue();
Integer newNumber = ndep.get(p);
if (newNumber!=null && oldNumber.compareTo(newNumber)<0) {
r.put(p,new DependencyChange(p,oldNumber,newNumber));
}
}
return r;
}
/**
* Represents a change in the dependency.
*/
public static final class DependencyChange {
/**
* The dependency project.
*/
public final AbstractProject project;
/**
* Version of the dependency project used in the previous build.
*/
public final int fromId;
/**
* {@link Build} object for {@link #fromId}. Can be null if the log is gone.
*/
public final AbstractBuild from;
/**
* Version of the dependency project used in this build.
*/
public final int toId;
public final AbstractBuild to;
public DependencyChange(AbstractProject<?,?> project, int fromId, int toId) {
this.project = project;
this.fromId = fromId;
this.toId = toId;
this.from = project.getBuildByNumber(fromId);
this.to = project.getBuildByNumber(toId);
}
/**
* Gets the {@link AbstractBuild} objects (fromId,toId].
* <p>
* This method returns all such available builds in the ascending order
* of IDs, but due to log rotations, some builds may be already unavailable.
*/
public List<AbstractBuild> getBuilds() {
List<AbstractBuild> r = new ArrayList<AbstractBuild>();
AbstractBuild<?,?> b = project.getNearestBuild(fromId);
if (b!=null && b.getNumber()==fromId)
b = b.getNextBuild(); // fromId exclusive
while (b!=null && b.getNumber()<=toId) {
r.add(b);
b = b.getNextBuild();
}
return r;
}
}
//
// web methods
//
/**
* @deprecated as of 1.489
* Use {@link #doStop()}
*/
@Deprecated
@RequirePOST // #doStop() should be preferred, but better to be safe
public void doStop(StaplerRequest req, StaplerResponse rsp) throws IOException, ServletException {
doStop().generateResponse(req,rsp,this);
}
/**
* Stops this build if it's still going.
*
* If we use this/executor/stop URL, it causes 404 if the build is already killed,
* as {@link #getExecutor()} returns null.
*
* @since 1.489
*/
@RequirePOST
public synchronized HttpResponse doStop() throws IOException, ServletException {
Executor e = getExecutor();
if (e==null)
e = getOneOffExecutor();
if (e!=null)
return e.doStop();
else
// nothing is building
return HttpResponses.forwardToPreviousPage();
}
private static final Logger LOGGER = Logger.getLogger(AbstractBuild.class.getName());
}
| 52,051 | 36.746193 | 179 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/AbstractBuild2.java
|
/*
* The MIT License
*
* Copyright (c) 2004-2010, Sun Microsystems, Inc., Kohsuke Kawaguchi, Yahoo! Inc., CloudBees, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package hudson.model;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSortedSet;
import hudson.AbortException;
import hudson.EnvVars;
import hudson.FilePath;
import hudson.Functions;
import hudson.Launcher;
import jenkins.util.SystemProperties;
import hudson.console.ModelHyperlinkNote;
import hudson.model.Fingerprint.BuildPtr;
import hudson.model.Fingerprint.RangeSet;
import hudson.model.labels.LabelAtom;
import hudson.model.listeners.RunListener;
import hudson.model.listeners.SCMListener;
import hudson.remoting.ChannelClosedException;
import hudson.remoting.RequestAbortedException;
import hudson.scm.ChangeLogParser;
import hudson.scm.ChangeLogSet;
import hudson.scm.ChangeLogSet.Entry;
import hudson.scm.NullChangeLogParser;
import hudson.scm.SCM;
import hudson.scm.SCMRevisionState;
import hudson.slaves.NodeProperty;
import hudson.slaves.WorkspaceList;
import hudson.slaves.WorkspaceList.Lease;
import hudson.slaves.OfflineCause;
import hudson.tasks.BuildStep;
import hudson.tasks.BuildStepMonitor;
import hudson.tasks.BuildTrigger;
import hudson.tasks.BuildWrapper;
import hudson.tasks.Builder;
import hudson.tasks.Fingerprinter.FingerprintAction;
import hudson.tasks.Publisher;
import hudson.util.*;
import jenkins.model.Jenkins;
import org.kohsuke.stapler.HttpResponse;
import org.kohsuke.stapler.Stapler;
import org.kohsuke.stapler.StaplerRequest;
import org.kohsuke.stapler.StaplerResponse;
import org.kohsuke.stapler.export.Exported;
import org.xml.sax.SAXException;
import javax.servlet.ServletException;
import java.io.File;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.lang.ref.WeakReference;
import java.util.AbstractSet;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.CheckForNull;
import javax.annotation.Nonnull;
import org.kohsuke.stapler.interceptor.RequirePOST;
import static java.util.logging.Level.WARNING;
import jenkins.model.lazy.BuildReference;
import jenkins.model.lazy.LazyBuildMixIn;
import org.kohsuke.accmod.Restricted;
import org.kohsuke.accmod.restrictions.DoNotUse;
/**
* Base implementation of {@link Run}s that build software.
*
* For now this is primarily the common part of {@link Build} and MavenBuild.
*
* @author Kohsuke Kawaguchi
* @see AbstractProject
*/
public abstract class AbstractBuild<P extends AbstractProject<P,R>,R extends AbstractBuild<P,R>> extends Run<P,R> implements Queue.Executable, LazyBuildMixIn.LazyLoadingRun<P,R> {
/**
* Set if we want the blame information to flow from upstream to downstream build.
*/
private static final boolean upstreamCulprits = SystemProperties.getBoolean("hudson.upstreamCulprits");
/**
* Name of the agent this project was built on.
* Null or "" if built by the master. (null happens when we read old record that didn't have this information.)
*/
private String builtOn;
/**
* The file path on the node that performed a build. Kept as a string since {@link FilePath} is not serializable into XML.
* @since 1.319
*/
private String workspace;
/**
* Version of Hudson that built this.
*/
private String hudsonVersion;
/**
* SCM used for this build.
*/
private ChangeLogParser scm;
/**
* Changes in this build.
*/
private volatile transient WeakReference<ChangeLogSet<? extends Entry>> changeSet;
/**
* Cumulative list of people who contributed to the build problem.
*
* <p>
* This is a list of {@link User#getId() user ids} who made a change
* since the last non-broken build. Can be null (which should be
* treated like empty set), because of the compatibility.
*
* <p>
* This field is semi-final --- once set the value will never be modified.
*
* @since 1.137
*/
private volatile Set<String> culprits;
/**
* During the build this field remembers {@link hudson.tasks.BuildWrapper.Environment}s created by
* {@link BuildWrapper}. This design is bit ugly but forced due to compatibility.
*/
protected transient List<Environment> buildEnvironments;
private transient final LazyBuildMixIn.RunMixIn<P,R> runMixIn = new LazyBuildMixIn.RunMixIn<P,R>() {
@Override protected R asRun() {
return _this();
}
};
protected AbstractBuild(P job) throws IOException {
super(job);
}
protected AbstractBuild(P job, Calendar timestamp) {
super(job, timestamp);
}
protected AbstractBuild(P project, File buildDir) throws IOException {
super(project, buildDir);
}
public final P getProject() {
return getParent();
}
@Override public final LazyBuildMixIn.RunMixIn<P,R> getRunMixIn() {
return runMixIn;
}
@Override protected final BuildReference<R> createReference() {
return getRunMixIn().createReference();
}
@Override protected final void dropLinks() {
getRunMixIn().dropLinks();
}
@Override
public R getPreviousBuild() {
return getRunMixIn().getPreviousBuild();
}
@Override
public R getNextBuild() {
return getRunMixIn().getNextBuild();
}
/**
* Returns a {@link Slave} on which this build was done.
*
* @return
* null, for example if the agent that this build run no longer exists.
*/
public @CheckForNull Node getBuiltOn() {
if (builtOn==null || builtOn.equals(""))
return Jenkins.getInstance();
else
return Jenkins.getInstance().getNode(builtOn);
}
/**
* Returns the name of the agent it was built on; null or "" if built by the master.
* (null happens when we read old record that didn't have this information.)
*/
@Exported(name="builtOn")
public String getBuiltOnStr() {
return builtOn;
}
/**
* Allows subtypes to set the value of {@link #builtOn}.
* This is used for those implementations where an {@link AbstractBuild} is made 'built' without
* actually running its {@link #run()} method.
*
* @since 1.429
*/
protected void setBuiltOnStr( String builtOn ) {
this.builtOn = builtOn;
}
/**
* Gets the nearest ancestor {@link AbstractBuild} that belongs to
* {@linkplain AbstractProject#getRootProject() the root project of getProject()} that
* dominates/governs/encompasses this build.
*
* <p>
* Some projects (such as matrix projects, Maven projects, or promotion processes) form a tree of jobs,
* and still in some of them, builds of child projects are related/tied to that of the parent project.
* In such a case, this method returns the governing build.
*
* @return never null. In the worst case the build dominates itself.
* @since 1.421
* @see AbstractProject#getRootProject()
*/
public AbstractBuild<?,?> getRootBuild() {
return this;
}
/**
* Used to render the side panel "Back to project" link.
*
* <p>
* In a rare situation where a build can be reached from multiple paths,
* returning different URLs from this method based on situations might
* be desirable.
*
* <p>
* If you override this method, you'll most likely also want to override
* {@link #getDisplayName()}.
*/
public String getUpUrl() {
return Functions.getNearestAncestorUrl(Stapler.getCurrentRequest(),getParent())+'/';
}
/**
* Gets the directory where this build is being built.
*
* <p>
* Note to implementors: to control where the workspace is created, override
* {@link AbstractBuildExecution#decideWorkspace(Node,WorkspaceList)}.
*
* @return
* null if the workspace is on an agent that's not connected. Note that once the build is completed,
* the workspace may be used to build something else, so the value returned from this method may
* no longer show a workspace as it was used for this build.
* @since 1.319
*/
public final @CheckForNull FilePath getWorkspace() {
if (workspace==null) return null;
Node n = getBuiltOn();
if (n==null) return null;
return n.createPath(workspace);
}
/**
* Normally, a workspace is assigned by {@link hudson.model.Run.RunExecution}, but this lets you set the workspace in case
* {@link AbstractBuild} is created without a build.
*/
protected void setWorkspace(@Nonnull FilePath ws) {
this.workspace = ws.getRemote();
}
/**
* Returns the root directory of the checked-out module.
* <p>
* This is usually where <tt>pom.xml</tt>, <tt>build.xml</tt>
* and so on exists.
*/
public final FilePath getModuleRoot() {
FilePath ws = getWorkspace();
if (ws==null) return null;
return getParent().getScm().getModuleRoot(ws, this);
}
/**
* Returns the root directories of all checked-out modules.
* <p>
* Some SCMs support checking out multiple modules into the same workspace.
* In these cases, the returned array will have a length greater than one.
* @return The roots of all modules checked out from the SCM.
*/
public FilePath[] getModuleRoots() {
FilePath ws = getWorkspace();
if (ws==null) return null;
return getParent().getScm().getModuleRoots(ws, this);
}
/**
* List of users who committed a change since the last non-broken build till now.
*
* <p>
* This list at least always include people who made changes in this build, but
* if the previous build was a failure it also includes the culprit list from there.
*
* @return
* can be empty but never null.
*/
@Exported
public Set<User> getCulprits() {
if (culprits==null) {
Set<User> r = new HashSet<User>();
R p = getPreviousCompletedBuild();
if (p !=null && isBuilding()) {
Result pr = p.getResult();
if (pr!=null && pr.isWorseThan(Result.SUCCESS)) {
// we are still building, so this is just the current latest information,
// but we seems to be failing so far, so inherit culprits from the previous build.
// isBuilding() check is to avoid recursion when loading data from old Hudson, which doesn't record
// this information
r.addAll(p.getCulprits());
}
}
for (Entry e : getChangeSet())
r.add(e.getAuthor());
if (upstreamCulprits) {
// If we have dependencies since the last successful build, add their authors to our list
if (getPreviousNotFailedBuild() != null) {
Map <AbstractProject,DependencyChange> depmap = getDependencyChanges(getPreviousSuccessfulBuild());
for (DependencyChange dep : depmap.values()) {
for (AbstractBuild<?,?> b : dep.getBuilds()) {
for (Entry entry : b.getChangeSet()) {
r.add(entry.getAuthor());
}
}
}
}
}
return r;
}
return new AbstractSet<User>() {
public Iterator<User> iterator() {
return new AdaptedIterator<String,User>(culprits.iterator()) {
protected User adapt(String id) {
return User.get(id);
}
};
}
public int size() {
return culprits.size();
}
};
}
/**
* Returns true if this user has made a commit to this build.
*
* @since 1.191
*/
public boolean hasParticipant(User user) {
for (ChangeLogSet.Entry e : getChangeSet())
try{
if (e.getAuthor()==user)
return true;
} catch (RuntimeException re) {
LOGGER.log(Level.INFO, "Failed to determine author of changelog " + e.getCommitId() + "for " + getParent().getDisplayName() + ", " + getDisplayName(), re);
}
return false;
}
/**
* Gets the version of Hudson that was used to build this job.
*
* @since 1.246
*/
public String getHudsonVersion() {
return hudsonVersion;
}
/**
* @deprecated as of 1.467
* Please use {@link hudson.model.Run.RunExecution}
*/
@Deprecated
public abstract class AbstractRunner extends AbstractBuildExecution {
}
public abstract class AbstractBuildExecution extends Runner {
/*
Some plugins might depend on this instance castable to Runner, so we need to use
deprecated class here.
*/
/**
* Since configuration can be changed while a build is in progress,
* create a launcher once and stick to it for the entire build duration.
*/
protected Launcher launcher;
/**
* Output/progress of this build goes here.
*/
protected BuildListener listener;
/**
* Lease of the workspace.
*/
private Lease lease;
/**
* Returns the current {@link Node} on which we are building.
* @return Returns the current {@link Node}
* @throws IllegalStateException if that cannot be determined
*/
protected final @Nonnull Node getCurrentNode() throws IllegalStateException {
Executor exec = Executor.currentExecutor();
if (exec == null) {
throw new IllegalStateException("not being called from an executor thread");
}
Computer c = exec.getOwner();
Node node = c.getNode();
if (node == null) {
throw new IllegalStateException("no longer a configured node for " + c.getName());
}
return node;
}
public Launcher getLauncher() {
return launcher;
}
public BuildListener getListener() {
return listener;
}
/**
* Allocates the workspace from {@link WorkspaceList}.
*
* @param n
* Passed in for the convenience. The node where the build is running.
* @param wsl
* Passed in for the convenience. The returned path must be registered to this object.
*/
protected Lease decideWorkspace(@Nonnull Node n, WorkspaceList wsl) throws InterruptedException, IOException {
String customWorkspace = getProject().getCustomWorkspace();
if (customWorkspace != null) {
// we allow custom workspaces to be concurrently used between jobs.
return Lease.createDummyLease(n.getRootPath().child(getEnvironment(listener).expand(customWorkspace)));
}
// TODO: this cast is indicative of abstraction problem
return wsl.allocate(n.getWorkspaceFor((TopLevelItem)getProject()), getBuild());
}
public Result run(@Nonnull BuildListener listener) throws Exception {
final Node node = getCurrentNode();
assert builtOn==null;
builtOn = node.getNodeName();
hudsonVersion = Jenkins.VERSION;
this.listener = listener;
launcher = createLauncher(listener);
if (!Jenkins.getInstance().getNodes().isEmpty()) {
if (node instanceof Jenkins) {
listener.getLogger().print(Messages.AbstractBuild_BuildingOnMaster());
} else {
listener.getLogger().print(Messages.AbstractBuild_BuildingRemotely(ModelHyperlinkNote.encodeTo("/computer/" + builtOn, builtOn)));
Set<LabelAtom> assignedLabels = new HashSet<LabelAtom>(node.getAssignedLabels());
assignedLabels.remove(node.getSelfLabel());
if (!assignedLabels.isEmpty()) {
boolean first = true;
for (LabelAtom label : assignedLabels) {
if (first) {
listener.getLogger().print(" (");
first = false;
} else {
listener.getLogger().print(' ');
}
listener.getLogger().print(label.getName());
}
listener.getLogger().print(')');
}
}
} else {
listener.getLogger().print(Messages.AbstractBuild_Building());
}
lease = decideWorkspace(node, Computer.currentComputer().getWorkspaceList());
workspace = lease.path.getRemote();
listener.getLogger().println(Messages.AbstractBuild_BuildingInWorkspace(workspace));
node.getFileSystemProvisioner().prepareWorkspace(AbstractBuild.this,lease.path,listener);
for (WorkspaceListener wl : WorkspaceListener.all()) {
wl.beforeUse(AbstractBuild.this, lease.path, listener);
}
getProject().getScmCheckoutStrategy().preCheckout(AbstractBuild.this, launcher, this.listener);
getProject().getScmCheckoutStrategy().checkout(this);
if (!preBuild(listener,project.getProperties()))
return Result.FAILURE;
Result result = doRun(listener);
if (node.getChannel() != null) {
// kill run-away processes that are left
// use multiple environment variables so that people can escape this massacre by overriding an environment
// variable for some processes
launcher.kill(getCharacteristicEnvVars());
}
// this is ugly, but for historical reason, if non-null value is returned
// it should become the final result.
if (result==null) result = getResult();
if (result==null) result = Result.SUCCESS;
return result;
}
/**
* Creates a {@link Launcher} that this build will use. This can be overridden by derived types
* to decorate the resulting {@link Launcher}.
*
* @param listener
* Always non-null. Connected to the main build output.
*/
@Nonnull
protected Launcher createLauncher(@Nonnull BuildListener listener) throws IOException, InterruptedException {
final Node currentNode = getCurrentNode();
Launcher l = currentNode.createLauncher(listener);
if (project instanceof BuildableItemWithBuildWrappers) {
BuildableItemWithBuildWrappers biwbw = (BuildableItemWithBuildWrappers) project;
for (BuildWrapper bw : biwbw.getBuildWrappersList())
l = bw.decorateLauncher(AbstractBuild.this,l,listener);
}
buildEnvironments = new ArrayList<Environment>();
for (RunListener rl: RunListener.all()) {
Environment environment = rl.setUpEnvironment(AbstractBuild.this, l, listener);
if (environment != null) {
buildEnvironments.add(environment);
}
}
for (NodeProperty nodeProperty: Jenkins.getInstance().getGlobalNodeProperties()) {
Environment environment = nodeProperty.setUp(AbstractBuild.this, l, listener);
if (environment != null) {
buildEnvironments.add(environment);
}
}
for (NodeProperty nodeProperty: currentNode.getNodeProperties()) {
Environment environment = nodeProperty.setUp(AbstractBuild.this, l, listener);
if (environment != null) {
buildEnvironments.add(environment);
}
}
return l;
}
public void defaultCheckout() throws IOException, InterruptedException {
AbstractBuild<?,?> build = AbstractBuild.this;
AbstractProject<?, ?> project = build.getProject();
for (int retryCount=project.getScmCheckoutRetryCount(); ; retryCount--) {
build.scm = NullChangeLogParser.INSTANCE;
try {
File changeLogFile = new File(build.getRootDir(), "changelog.xml");
if (project.checkout(build, launcher,listener, changeLogFile)) {
// check out succeeded
SCM scm = project.getScm();
for (SCMListener l : SCMListener.all()) {
try {
l.onCheckout(build, scm, build.getWorkspace(), listener, changeLogFile, build.getAction(SCMRevisionState.class));
} catch (Exception e) {
throw new IOException(e);
}
}
build.scm = scm.createChangeLogParser();
build.changeSet = new WeakReference<ChangeLogSet<? extends Entry>>(build.calcChangeSet());
for (SCMListener l : SCMListener.all())
try {
l.onChangeLogParsed(build,listener,build.getChangeSet());
} catch (Exception e) {
throw new IOException("Failed to parse changelog",e);
}
// Get a chance to do something after checkout and changelog is done
scm.postCheckout( build, launcher, build.getWorkspace(), listener );
return;
}
} catch (AbortException e) {
listener.error(e.getMessage());
} catch (InterruptedIOException e) {
throw (InterruptedException)new InterruptedException().initCause(e);
} catch (IOException e) {
// checkout error not yet reported
Functions.printStackTrace(e, listener.getLogger());
}
if (retryCount == 0) // all attempts failed
throw new RunnerAbortedException();
listener.getLogger().println("Retrying after 10 seconds");
Thread.sleep(10000);
}
}
/**
* The portion of a build that is specific to a subclass of {@link AbstractBuild}
* goes here.
*
* @return
* null to continue the build normally (that means the doRun method
* itself run successfully)
* Return a non-null value to abort the build right there with the specified result code.
*/
protected abstract Result doRun(BuildListener listener) throws Exception, RunnerAbortedException;
/**
* @see #post(BuildListener)
*/
protected abstract void post2(BuildListener listener) throws Exception;
public final void post(BuildListener listener) throws Exception {
try {
post2(listener);
} finally {
// update the culprit list
HashSet<String> r = new HashSet<String>();
for (User u : getCulprits())
r.add(u.getId());
culprits = ImmutableSortedSet.copyOf(r);
CheckPoint.CULPRITS_DETERMINED.report();
}
}
public void cleanUp(BuildListener listener) throws Exception {
if (lease!=null) {
lease.release();
lease = null;
}
BuildTrigger.execute(AbstractBuild.this, listener);
buildEnvironments = null;
}
/**
* @deprecated as of 1.356
* Use {@link #performAllBuildSteps(BuildListener, Map, boolean)}
*/
@Deprecated
protected final void performAllBuildStep(BuildListener listener, Map<?,? extends BuildStep> buildSteps, boolean phase) throws InterruptedException, IOException {
performAllBuildSteps(listener,buildSteps.values(),phase);
}
protected final boolean performAllBuildSteps(BuildListener listener, Map<?,? extends BuildStep> buildSteps, boolean phase) throws InterruptedException, IOException {
return performAllBuildSteps(listener,buildSteps.values(),phase);
}
/**
* @deprecated as of 1.356
* Use {@link #performAllBuildSteps(BuildListener, Iterable, boolean)}
*/
@Deprecated
protected final void performAllBuildStep(BuildListener listener, Iterable<? extends BuildStep> buildSteps, boolean phase) throws InterruptedException, IOException {
performAllBuildSteps(listener,buildSteps,phase);
}
/**
* Runs all the given build steps, even if one of them fail.
*
* @param phase
* true for the post build processing, and false for the final "run after finished" execution.
*
* @return false if any build step failed
*/
protected final boolean performAllBuildSteps(BuildListener listener, Iterable<? extends BuildStep> buildSteps, boolean phase) throws InterruptedException, IOException {
boolean r = true;
for (BuildStep bs : buildSteps) {
if ((bs instanceof Publisher && ((Publisher)bs).needsToRunAfterFinalized()) ^ phase)
try {
if (!perform(bs,listener)) {
LOGGER.log(Level.FINE, "{0} : {1} failed", new Object[] {AbstractBuild.this, bs});
r = false;
if (phase) {
setResult(Result.FAILURE);
}
}
} catch (Exception e) {
reportError(bs, e, listener, phase);
r = false;
} catch (LinkageError e) {
reportError(bs, e, listener, phase);
r = false;
}
}
return r;
}
private void reportError(BuildStep bs, Throwable e, BuildListener listener, boolean phase) {
final String buildStep;
if (bs instanceof Describable) {
buildStep = ((Describable) bs).getDescriptor().getDisplayName();
} else {
buildStep = bs.getClass().getName();
}
if (e instanceof AbortException) {
LOGGER.log(Level.FINE, "{0} : {1} failed", new Object[] {AbstractBuild.this, buildStep});
listener.error("Step ‘" + buildStep + "’ failed: " + e.getMessage());
} else {
String msg = "Step ‘" + buildStep + "’ aborted due to exception: ";
Functions.printStackTrace(e, listener.error(msg));
LOGGER.log(WARNING, msg, e);
}
if (phase) {
setResult(Result.FAILURE);
}
}
/**
* Calls a build step.
*/
protected final boolean perform(BuildStep bs, BuildListener listener) throws InterruptedException, IOException {
BuildStepMonitor mon;
try {
mon = bs.getRequiredMonitorService();
} catch (AbstractMethodError e) {
mon = BuildStepMonitor.BUILD;
}
Result oldResult = AbstractBuild.this.getResult();
for (BuildStepListener bsl : BuildStepListener.all()) {
bsl.started(AbstractBuild.this, bs, listener);
}
boolean canContinue = false;
try {
canContinue = mon.perform(bs, AbstractBuild.this, launcher, listener);
} catch (RequestAbortedException ex) {
// Channel is closed, do not continue
reportBrokenChannel(listener);
} catch (ChannelClosedException ex) {
// Channel is closed, do not continue
reportBrokenChannel(listener);
} catch (RuntimeException ex) {
Functions.printStackTrace(ex, listener.error("Build step failed with exception"));
}
for (BuildStepListener bsl : BuildStepListener.all()) {
bsl.finished(AbstractBuild.this, bs, listener, canContinue);
}
Result newResult = AbstractBuild.this.getResult();
if (newResult != oldResult) {
String buildStepName = getBuildStepName(bs);
listener.getLogger().format("Build step '%s' changed build result to %s%n", buildStepName, newResult);
}
if (!canContinue) {
String buildStepName = getBuildStepName(bs);
listener.getLogger().format("Build step '%s' marked build as failure%n", buildStepName);
}
return canContinue;
}
private void reportBrokenChannel(BuildListener listener) throws IOException {
final Node node = getCurrentNode();
listener.hyperlink("/" + node.toComputer().getUrl() + "log", "Agent went offline during the build");
listener.getLogger().println();
final OfflineCause offlineCause = node.toComputer().getOfflineCause();
if (offlineCause != null) {
listener.error(offlineCause.toString());
}
}
private String getBuildStepName(BuildStep bs) {
if (bs instanceof Describable<?>) {
return ((Describable<?>) bs).getDescriptor().getDisplayName();
} else {
return bs.getClass().getSimpleName();
}
}
protected final boolean preBuild(BuildListener listener,Map<?,? extends BuildStep> steps) {
return preBuild(listener,steps.values());
}
protected final boolean preBuild(BuildListener listener,Collection<? extends BuildStep> steps) {
return preBuild(listener,(Iterable<? extends BuildStep>)steps);
}
protected final boolean preBuild(BuildListener listener,Iterable<? extends BuildStep> steps) {
for (BuildStep bs : steps)
if (!bs.prebuild(AbstractBuild.this,listener)) {
LOGGER.log(Level.FINE, "{0} : {1} failed", new Object[] {AbstractBuild.this, bs});
return false;
}
return true;
}
}
/**
* get the fingerprints associated with this build
*
* @return never null
*/
@Exported(name = "fingerprint", inline = true, visibility = -1)
public Collection<Fingerprint> getBuildFingerprints() {
FingerprintAction fingerprintAction = getAction(FingerprintAction.class);
if (fingerprintAction != null) {
return fingerprintAction.getFingerprints().values();
}
return Collections.<Fingerprint>emptyList();
}
/*
* No need to lock the entire AbstractBuild on change set calculation
*/
private transient Object changeSetLock = new Object();
/**
* Gets the changes incorporated into this build.
*
* @return never null.
*/
@Exported
public ChangeLogSet<? extends Entry> getChangeSet() {
synchronized (changeSetLock) {
if (scm==null) {
scm = NullChangeLogParser.INSTANCE;
}
}
ChangeLogSet<? extends Entry> cs = null;
if (changeSet!=null)
cs = changeSet.get();
if (cs==null)
cs = calcChangeSet();
// defensive check. if the calculation fails (such as through an exception),
// set a dummy value so that it'll work the next time. the exception will
// be still reported, giving the plugin developer an opportunity to fix it.
if (cs==null)
cs = ChangeLogSet.createEmpty(this);
changeSet = new WeakReference<ChangeLogSet<? extends Entry>>(cs);
return cs;
}
@Restricted(DoNotUse.class) // for project-changes.jelly
public List<ChangeLogSet<? extends ChangeLogSet.Entry>> getChangeSets() {
ChangeLogSet<? extends Entry> cs = getChangeSet();
return cs.isEmptySet() ? Collections.<ChangeLogSet<? extends ChangeLogSet.Entry>>emptyList() : Collections.<ChangeLogSet<? extends ChangeLogSet.Entry>>singletonList(cs);
}
/**
* Returns true if the changelog is already computed.
*/
public boolean hasChangeSetComputed() {
File changelogFile = new File(getRootDir(), "changelog.xml");
return changelogFile.exists();
}
private ChangeLogSet<? extends Entry> calcChangeSet() {
File changelogFile = new File(getRootDir(), "changelog.xml");
if (!changelogFile.exists())
return ChangeLogSet.createEmpty(this);
try {
return scm.parse(this,changelogFile);
} catch (IOException e) {
LOGGER.log(WARNING, "Failed to parse "+changelogFile,e);
} catch (SAXException e) {
LOGGER.log(WARNING, "Failed to parse "+changelogFile,e);
}
return ChangeLogSet.createEmpty(this);
}
@Override
public EnvVars getEnvironment(TaskListener log) throws IOException, InterruptedException {
EnvVars env = super.getEnvironment(log);
FilePath ws = getWorkspace();
if (ws!=null) // if this is done very early on in the build, workspace may not be decided yet. see HUDSON-3997
env.put("WORKSPACE", ws.getRemote());
project.getScm().buildEnvVars(this,env);
if (buildEnvironments!=null)
for (Environment e : buildEnvironments)
e.buildEnvVars(env);
for (EnvironmentContributingAction a : getActions(EnvironmentContributingAction.class))
a.buildEnvVars(this,env);
EnvVars.resolve(env);
return env;
}
/**
* During the build, expose the environments contributed by {@link BuildWrapper}s and others.
*
* <p>
* Since 1.444, executor thread that's doing the build can access mutable underlying list,
* which allows the caller to add/remove environments. The recommended way of adding
* environment is through {@link BuildWrapper}, but this might be handy for build steps
* who wants to expose additional environment variables to the rest of the build.
*
* @return can be empty list, but never null. Immutable.
* @since 1.437
*/
public EnvironmentList getEnvironments() {
Executor e = Executor.currentExecutor();
if (e!=null && e.getCurrentExecutable()==this) {
if (buildEnvironments==null) buildEnvironments = new ArrayList<Environment>();
return new EnvironmentList(buildEnvironments);
}
return new EnvironmentList(buildEnvironments==null ? Collections.<Environment>emptyList() : ImmutableList.copyOf(buildEnvironments));
}
public Calendar due() {
return getTimestamp();
}
/**
* {@inheritDoc}
* The action may have a {@code summary.jelly} view containing a {@code <t:summary>} or other {@code <tr>}.
*/
@Override public void addAction(Action a) {
super.addAction(a);
}
@SuppressWarnings("deprecation")
public List<Action> getPersistentActions(){
return super.getActions();
}
/**
* Builds up a set of variable names that contain sensitive values that
* should not be exposed. The expectation is that this set is populated with
* keys returned by {@link #getBuildVariables()} that should have their
* values masked for display purposes.
*
* @since 1.378
*/
public Set<String> getSensitiveBuildVariables() {
Set<String> s = new HashSet<String>();
ParametersAction parameters = getAction(ParametersAction.class);
if (parameters != null) {
for (ParameterValue p : parameters) {
if (p.isSensitive()) {
s.add(p.getName());
}
}
}
// Allow BuildWrappers to determine if any of their data is sensitive
if (project instanceof BuildableItemWithBuildWrappers) {
for (BuildWrapper bw : ((BuildableItemWithBuildWrappers) project).getBuildWrappersList()) {
bw.makeSensitiveBuildVariables(this, s);
}
}
return s;
}
/**
* Provides additional variables and their values to {@link Builder}s.
*
* <p>
* This mechanism is used by {@code MatrixConfiguration} to pass
* the configuration values to the current build. It is up to
* {@link Builder}s to decide whether they want to recognize the values
* or how to use them.
*
* <p>
* This also includes build parameters if a build is parameterized.
*
* @return
* The returned map is mutable so that subtypes can put more values.
*/
public Map<String,String> getBuildVariables() {
Map<String,String> r = new HashMap<String, String>();
ParametersAction parameters = getAction(ParametersAction.class);
if (parameters!=null) {
// this is a rather round about way of doing this...
for (ParameterValue p : parameters) {
String v = p.createVariableResolver(this).resolve(p.getName());
if (v!=null) r.put(p.getName(),v);
}
}
// allow the BuildWrappers to contribute additional build variables
if (project instanceof BuildableItemWithBuildWrappers) {
for (BuildWrapper bw : ((BuildableItemWithBuildWrappers) project).getBuildWrappersList())
bw.makeBuildVariables(this,r);
}
for (BuildVariableContributor bvc : BuildVariableContributor.all())
bvc.buildVariablesFor(this,r);
return r;
}
/**
* Creates {@link VariableResolver} backed by {@link #getBuildVariables()}.
*/
public final VariableResolver<String> getBuildVariableResolver() {
return new VariableResolver.ByMap<String>(getBuildVariables());
}
/**
* @deprecated Use {@link #getAction(Class)} on {@link AbstractTestResultAction}.
*/
@Deprecated
public Action getTestResultAction() {
try {
return getAction(Jenkins.getInstance().getPluginManager().uberClassLoader.loadClass("hudson.tasks.test.AbstractTestResultAction").asSubclass(Action.class));
} catch (ClassNotFoundException x) {
return null;
}
}
/**
* @deprecated Use {@link #getAction(Class)} on {@link AggregatedTestResultAction}.
*/
@Deprecated
public Action getAggregatedTestResultAction() {
try {
return getAction(Jenkins.getInstance().getPluginManager().uberClassLoader.loadClass("hudson.tasks.test.AggregatedTestResultAction").asSubclass(Action.class));
} catch (ClassNotFoundException x) {
return null;
}
}
/**
* Invoked by {@link Executor} to performs a build.
*/
public abstract void run();
//
//
// fingerprint related stuff
//
//
@Override
public String getWhyKeepLog() {
// if any of the downstream project is configured with 'keep dependency component',
// we need to keep this log
OUTER:
for (AbstractProject<?,?> p : getParent().getDownstreamProjects()) {
if (!p.isKeepDependencies()) continue;
AbstractBuild<?,?> fb = p.getFirstBuild();
if (fb==null) continue; // no active record
// is there any active build that depends on us?
for (int i : getDownstreamRelationship(p).listNumbersReverse()) {
// TODO: this is essentially a "find intersection between two sparse sequences"
// and we should be able to do much better.
if (i<fb.getNumber())
continue OUTER; // all the other records are younger than the first record, so pointless to search.
AbstractBuild<?,?> b = p.getBuildByNumber(i);
if (b!=null)
return Messages.AbstractBuild_KeptBecause(p.hasPermission(Item.READ) ? b.toString() : "?");
}
}
return super.getWhyKeepLog();
}
/**
* Gets the dependency relationship from this build (as the source)
* and that project (as the sink.)
*
* @return
* range of build numbers that represent which downstream builds are using this build.
* The range will be empty if no build of that project matches this (or there is no {@link FingerprintAction}), but it'll never be null.
*/
public RangeSet getDownstreamRelationship(AbstractProject that) {
RangeSet rs = new RangeSet();
FingerprintAction f = getAction(FingerprintAction.class);
if (f==null) return rs;
// look for fingerprints that point to this build as the source, and merge them all
for (Fingerprint e : f.getFingerprints().values()) {
if (upstreamCulprits) {
// With upstreamCulprits, we allow downstream relationships
// from intermediate jobs
rs.add(e.getRangeSet(that));
} else {
BuildPtr o = e.getOriginal();
if (o!=null && o.is(this))
rs.add(e.getRangeSet(that));
}
}
return rs;
}
/**
* Works like {@link #getDownstreamRelationship(AbstractProject)} but returns
* the actual build objects, in ascending order.
* @since 1.150
*/
public Iterable<AbstractBuild<?,?>> getDownstreamBuilds(final AbstractProject<?,?> that) {
final Iterable<Integer> nums = getDownstreamRelationship(that).listNumbers();
return new Iterable<AbstractBuild<?, ?>>() {
public Iterator<AbstractBuild<?, ?>> iterator() {
return Iterators.removeNull(
new AdaptedIterator<Integer,AbstractBuild<?,?>>(nums) {
protected AbstractBuild<?, ?> adapt(Integer item) {
return that.getBuildByNumber(item);
}
});
}
};
}
/**
* Gets the dependency relationship from this build (as the sink)
* and that project (as the source.)
*
* @return
* Build number of the upstream build that feed into this build,
* or -1 if no record is available (for example if there is no {@link FingerprintAction}, even if there is an {@link Cause.UpstreamCause}).
*/
public int getUpstreamRelationship(AbstractProject that) {
FingerprintAction f = getAction(FingerprintAction.class);
if (f==null) return -1;
int n = -1;
// look for fingerprints that point to the given project as the source, and merge them all
for (Fingerprint e : f.getFingerprints().values()) {
if (upstreamCulprits) {
// With upstreamCulprits, we allow upstream relationships
// from intermediate jobs
Fingerprint.RangeSet rangeset = e.getRangeSet(that);
if (!rangeset.isEmpty()) {
n = Math.max(n, rangeset.listNumbersReverse().iterator().next());
}
} else {
BuildPtr o = e.getOriginal();
if (o!=null && o.belongsTo(that))
n = Math.max(n,o.getNumber());
}
}
return n;
}
/**
* Works like {@link #getUpstreamRelationship(AbstractProject)} but returns the
* actual build object.
*
* @return
* null if no such upstream build was found, or it was found but the
* build record is already lost.
*/
public AbstractBuild<?,?> getUpstreamRelationshipBuild(AbstractProject<?,?> that) {
int n = getUpstreamRelationship(that);
if (n==-1) return null;
return that.getBuildByNumber(n);
}
/**
* Gets the downstream builds of this build, which are the builds of the
* downstream projects that use artifacts of this build.
*
* @return
* For each project with fingerprinting enabled, returns the range
* of builds (which can be empty if no build uses the artifact from this build or downstream is not {@link AbstractProject#isFingerprintConfigured}.)
*/
public Map<AbstractProject,RangeSet> getDownstreamBuilds() {
Map<AbstractProject,RangeSet> r = new HashMap<AbstractProject,RangeSet>();
for (AbstractProject p : getParent().getDownstreamProjects()) {
if (p.isFingerprintConfigured())
r.put(p,getDownstreamRelationship(p));
}
return r;
}
/**
* Gets the upstream builds of this build, which are the builds of the
* upstream projects whose artifacts feed into this build.
* @return empty if there is no {@link FingerprintAction} (even if there is an {@link Cause.UpstreamCause})
* @see #getTransitiveUpstreamBuilds()
*/
public Map<AbstractProject,Integer> getUpstreamBuilds() {
return _getUpstreamBuilds(getParent().getUpstreamProjects());
}
/**
* Works like {@link #getUpstreamBuilds()} but also includes all the transitive
* dependencies as well.
*/
public Map<AbstractProject,Integer> getTransitiveUpstreamBuilds() {
return _getUpstreamBuilds(getParent().getTransitiveUpstreamProjects());
}
private Map<AbstractProject, Integer> _getUpstreamBuilds(Collection<AbstractProject> projects) {
Map<AbstractProject,Integer> r = new HashMap<AbstractProject,Integer>();
for (AbstractProject p : projects) {
int n = getUpstreamRelationship(p);
if (n>=0)
r.put(p,n);
}
return r;
}
/**
* Gets the changes in the dependency between the given build and this build.
* @return empty if there is no {@link FingerprintAction}
*/
public Map<AbstractProject,DependencyChange> getDependencyChanges(AbstractBuild from) {
if (from==null) return Collections.emptyMap(); // make it easy to call this from views
FingerprintAction n = this.getAction(FingerprintAction.class);
FingerprintAction o = from.getAction(FingerprintAction.class);
if (n==null || o==null) return Collections.emptyMap();
Map<AbstractProject,Integer> ndep = n.getDependencies(true);
Map<AbstractProject,Integer> odep = o.getDependencies(true);
Map<AbstractProject,DependencyChange> r = new HashMap<AbstractProject,DependencyChange>();
for (Map.Entry<AbstractProject,Integer> entry : odep.entrySet()) {
AbstractProject p = entry.getKey();
Integer oldNumber = entry.getValue();
Integer newNumber = ndep.get(p);
if (newNumber!=null && oldNumber.compareTo(newNumber)<0) {
r.put(p,new DependencyChange(p,oldNumber,newNumber));
}
}
return r;
}
/**
* Represents a change in the dependency.
*/
public static final class DependencyChange {
/**
* The dependency project.
*/
public final AbstractProject project;
/**
* Version of the dependency project used in the previous build.
*/
public final int fromId;
/**
* {@link Build} object for {@link #fromId}. Can be null if the log is gone.
*/
public final AbstractBuild from;
/**
* Version of the dependency project used in this build.
*/
public final int toId;
public final AbstractBuild to;
public DependencyChange(AbstractProject<?,?> project, int fromId, int toId) {
this.project = project;
this.fromId = fromId;
this.toId = toId;
this.from = project.getBuildByNumber(fromId);
this.to = project.getBuildByNumber(toId);
}
/**
* Gets the {@link AbstractBuild} objects (fromId,toId].
* <p>
* This method returns all such available builds in the ascending order
* of IDs, but due to log rotations, some builds may be already unavailable.
*/
public List<AbstractBuild> getBuilds() {
List<AbstractBuild> r = new ArrayList<AbstractBuild>();
AbstractBuild<?,?> b = project.getNearestBuild(fromId);
if (b!=null && b.getNumber()==fromId)
b = b.getNextBuild(); // fromId exclusive
while (b!=null && b.getNumber()<=toId) {
r.add(b);
b = b.getNextBuild();
}
return r;
}
}
//
// web methods
//
/**
* @deprecated as of 1.489
* Use {@link #doStop()}
*/
@Deprecated
@RequirePOST // #doStop() should be preferred, but better to be safe
public void doStop(StaplerRequest req, StaplerResponse rsp) throws IOException, ServletException {
doStop().generateResponse(req,rsp,this);
}
/**
* Stops this build if it's still going.
*
* If we use this/executor/stop URL, it causes 404 if the build is already killed,
* as {@link #getExecutor()} returns null.
*
* @since 1.489
*/
@RequirePOST
public synchronized HttpResponse doStop() throws IOException, ServletException {
Executor e = getExecutor();
if (e==null)
e = getOneOffExecutor();
if (e!=null)
return e.doStop();
else
// nothing is building
return HttpResponses.forwardToPreviousPage();
}
private static final Logger LOGGER = Logger.getLogger(AbstractBuild.class.getName());
}
| 52,097 | 36.77955 | 179 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/AbstractProject1.java
|
/*
* The MIT License
*
* Copyright (c) 2004-2011, Sun Microsystems, Inc., Kohsuke Kawaguchi,
* Brian Westrich, Erik Ramfelt, Ertan Deniz, Jean-Baptiste Quenot,
* Luca Domenico Milanesio, R. Tyler Ballance, Stephen Connolly, Tom Huybrechts,
* id:cactusman, Yahoo! Inc., Andrew Bayer, Manufacture Francaise des Pneumatiques
* Michelin, Romain Seguy
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package hudson.model;
import antlr.ANTLRException;
import com.infradna.tool.bridge_method_injector.WithBridgeMethods;
import hudson.AbortException;
import hudson.CopyOnWrite;
import hudson.EnvVars;
import hudson.ExtensionList;
import hudson.ExtensionPoint;
import hudson.FeedAdapter;
import hudson.FilePath;
import hudson.Functions;
import hudson.Launcher;
import hudson.Util;
import hudson.cli.declarative.CLIMethod;
import hudson.cli.declarative.CLIResolver;
import hudson.model.Cause.LegacyCodeCause;
import hudson.model.Descriptor.FormException;
import hudson.model.Fingerprint.RangeSet;
import hudson.model.Node.Mode;
import hudson.model.Queue.Executable;
import hudson.model.Queue.Task;
import hudson.model.labels.LabelAtom;
import hudson.model.labels.LabelExpression;
import hudson.model.listeners.ItemListener;
import hudson.model.listeners.SCMPollListener;
import hudson.model.queue.CauseOfBlockage;
import hudson.model.queue.QueueTaskFuture;
import hudson.model.queue.SubTask;
import hudson.model.queue.SubTaskContributor;
import hudson.scm.ChangeLogSet;
import hudson.scm.ChangeLogSet.Entry;
import hudson.scm.NullSCM;
import hudson.scm.PollingResult;
import static hudson.scm.PollingResult.*;
import hudson.scm.SCM;
import hudson.scm.SCMRevisionState;
import hudson.scm.SCMS;
import hudson.search.SearchIndexBuilder;
import hudson.security.ACL;
import hudson.security.Permission;
import hudson.slaves.Cloud;
import hudson.slaves.WorkspaceList;
import hudson.tasks.BuildStep;
import hudson.tasks.BuildStepDescriptor;
import hudson.tasks.BuildTrigger;
import hudson.tasks.BuildWrapperDescriptor;
import hudson.tasks.Publisher;
import hudson.triggers.SCMTrigger;
import hudson.triggers.Trigger;
import hudson.triggers.TriggerDescriptor;
import hudson.util.AlternativeUiTextProvider;
import hudson.util.AlternativeUiTextProvider.Message;
import hudson.util.DescribableList;
import hudson.util.FormValidation;
import hudson.util.TimeUnit2;
import hudson.widgets.HistoryWidget;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.Vector;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.CheckForNull;
import javax.annotation.Nonnull;
import javax.servlet.ServletException;
import jenkins.model.BlockedBecauseOfBuildInProgress;
import jenkins.model.Jenkins;
import jenkins.model.JenkinsLocationConfiguration;
import jenkins.model.ParameterizedJobMixIn;
import jenkins.model.Uptime;
import jenkins.model.lazy.LazyBuildMixIn;
import jenkins.scm.DefaultSCMCheckoutStrategyImpl;
import jenkins.scm.SCMCheckoutStrategy;
import jenkins.scm.SCMCheckoutStrategyDescriptor;
import jenkins.scm.SCMDecisionHandler;
import jenkins.util.TimeDuration;
import net.sf.json.JSONObject;
import org.acegisecurity.Authentication;
import org.jenkinsci.bytecode.AdaptField;
import org.kohsuke.accmod.Restricted;
import org.kohsuke.accmod.restrictions.DoNotUse;
import org.kohsuke.accmod.restrictions.NoExternalUse;
import org.kohsuke.args4j.Argument;
import org.kohsuke.args4j.CmdLineException;
import org.kohsuke.stapler.AncestorInPath;
import org.kohsuke.stapler.ForwardToView;
import org.kohsuke.stapler.HttpRedirect;
import org.kohsuke.stapler.HttpResponse;
import org.kohsuke.stapler.QueryParameter;
import org.kohsuke.stapler.StaplerRequest;
import org.kohsuke.stapler.StaplerResponse;
import org.kohsuke.stapler.export.Exported;
import org.kohsuke.stapler.interceptor.RequirePOST;
/**
* Base implementation of {@link Job}s that build software.
*
* For now this is primarily the common part of {@link Project} and MavenModule.
*
* @author Kohsuke Kawaguchi
* @see AbstractBuild
*/
@SuppressWarnings("rawtypes")
public abstract class AbstractProject<P extends AbstractProject<P,R>,R extends AbstractBuild<P,R>> extends Job<P,R> implements BuildableItem, LazyBuildMixIn.LazyLoadingJob<P,R>, ParameterizedJobMixIn.ParameterizedJob {
/**
* {@link SCM} associated with the project.
* To allow derived classes to link {@link SCM} config to elsewhere,
* access to this variable should always go through {@link #getScm()}.
*/
private volatile SCM scm = new NullSCM();
/**
* Controls how the checkout is done.
*/
private volatile SCMCheckoutStrategy scmCheckoutStrategy;
/**
* State returned from {@link SCM#poll(AbstractProject, Launcher, FilePath, TaskListener, SCMRevisionState)}.
*/
private volatile transient SCMRevisionState pollingBaseline = null;
private transient LazyBuildMixIn<P,R> buildMixIn;
/**
* All the builds keyed by their build number.
* Kept here for binary compatibility only; otherwise use {@link #buildMixIn}.
* External code should use {@link #getBuildByNumber(int)} or {@link #getLastBuild()} and traverse via
* {@link Run#getPreviousBuild()}
*/
@Restricted(NoExternalUse.class)
protected transient RunMap<R> builds;
/**
* The quiet period. Null to delegate to the system default.
*/
private volatile Integer quietPeriod = null;
/**
* The retry count. Null to delegate to the system default.
*/
private volatile Integer scmCheckoutRetryCount = null;
/**
* If this project is configured to be only built on a certain label,
* this value will be set to that label.
*
* For historical reasons, this is called 'assignedNode'. Also for
* a historical reason, null to indicate the affinity
* with the master node.
*
* @see #canRoam
*/
private String assignedNode;
/**
* True if this project can be built on any node.
*
* <p>
* This somewhat ugly flag combination is so that we can migrate
* existing Hudson installations nicely.
*/
private volatile boolean canRoam;
/**
* True to suspend new builds.
*/
protected volatile boolean disabled;
/**
* True to keep builds of this project in queue when downstream projects are
* building. False by default to keep from breaking existing behavior.
*/
protected volatile boolean blockBuildWhenDownstreamBuilding = false;
/**
* True to keep builds of this project in queue when upstream projects are
* building. False by default to keep from breaking existing behavior.
*/
protected volatile boolean blockBuildWhenUpstreamBuilding = false;
/**
* Identifies {@link JDK} to be used.
* Null if no explicit configuration is required.
*
* <p>
* Can't store {@link JDK} directly because {@link Jenkins} and {@link Project}
* are saved independently.
*
* @see Jenkins#getJDK(String)
*/
private volatile String jdk;
private volatile BuildAuthorizationToken authToken = null;
/**
* List of all {@link Trigger}s for this project.
*/
@AdaptField(was=List.class)
protected volatile DescribableList<Trigger<?>,TriggerDescriptor> triggers = new DescribableList<Trigger<?>,TriggerDescriptor>(this);
private static final AtomicReferenceFieldUpdater<AbstractProject,DescribableList> triggersUpdater
= AtomicReferenceFieldUpdater.newUpdater(AbstractProject.class,DescribableList.class,"triggers");
/**
* {@link Action}s contributed from subsidiary objects associated with
* {@link AbstractProject}, such as from triggers, builders, publishers, etc.
*
* We don't want to persist them separately, and these actions
* come and go as configuration change, so it's kept separate.
*/
@CopyOnWrite
protected transient volatile List<Action> transientActions = new Vector<Action>();
private boolean concurrentBuild;
/**
* See {@link #setCustomWorkspace(String)}.
*
* @since 1.410
*/
private String customWorkspace;
protected AbstractProject(ItemGroup parent, String name) {
super(parent,name);
buildMixIn = createBuildMixIn();
builds = buildMixIn.getRunMap();
final Jenkins j = Jenkins.getInstance();
final List<Node> nodes = j != null ? j.getNodes() : null;
if(nodes!=null && !nodes.isEmpty()) {
// if a new job is configured with Hudson that already has agent nodes
// make it roamable by default
canRoam = true;
}
}
private LazyBuildMixIn<P,R> createBuildMixIn() {
return new LazyBuildMixIn<P,R>() {
@SuppressWarnings("unchecked") // untypable
@Override protected P asJob() {
return (P) AbstractProject.this;
}
@Override protected Class<R> getBuildClass() {
return AbstractProject.this.getBuildClass();
}
};
}
@Override public LazyBuildMixIn<P,R> getLazyBuildMixIn() {
return buildMixIn;
}
private ParameterizedJobMixIn<P,R> getParameterizedJobMixIn() {
return new ParameterizedJobMixIn<P,R>() {
@SuppressWarnings("unchecked") // untypable
@Override protected P asJob() {
return (P) AbstractProject.this;
}
};
}
@Override
public synchronized void save() throws IOException {
super.save();
updateTransientActions();
}
@Override
public void onCreatedFromScratch() {
super.onCreatedFromScratch();
buildMixIn.onCreatedFromScratch();
builds = buildMixIn.getRunMap();
// solicit initial contributions, especially from TransientProjectActionFactory
updateTransientActions();
}
@Override
public void onLoad(ItemGroup<? extends Item> parent, String name) throws IOException {
super.onLoad(parent, name);
if (buildMixIn == null) {
buildMixIn = createBuildMixIn();
}
buildMixIn.onLoad(parent, name);
builds = buildMixIn.getRunMap();
triggers().setOwner(this);
for (Trigger t : triggers()) {
try {
t.start(this, Items.currentlyUpdatingByXml());
} catch (Throwable e) {
LOGGER.log(Level.WARNING, "could not start trigger while loading project '" + getFullName() + "'", e);
}
}
if(scm==null)
scm = new NullSCM(); // perhaps it was pointing to a plugin that no longer exists.
if(transientActions==null)
transientActions = new Vector<Action>(); // happens when loaded from disk
updateTransientActions();
}
@WithBridgeMethods(List.class)
protected DescribableList<Trigger<?>,TriggerDescriptor> triggers() {
if (triggers == null) {
triggersUpdater.compareAndSet(this,null,new DescribableList<Trigger<?>,TriggerDescriptor>(this));
}
return triggers;
}
@Override
public EnvVars getEnvironment(Node node, TaskListener listener) throws IOException, InterruptedException {
EnvVars env = super.getEnvironment(node, listener);
JDK jdkTool = getJDK();
if (jdkTool != null) {
if (node != null) { // just in case were not in a build
jdkTool = jdkTool.forNode(node, listener);
}
jdkTool.buildEnvVars(env);
} else if (!JDK.isDefaultName(jdk)) {
listener.getLogger().println("No JDK named ‘" + jdk + "’ found");
}
return env;
}
@Override
protected void performDelete() throws IOException, InterruptedException {
// prevent a new build while a delete operation is in progress
makeDisabled(true);
FilePath ws = getWorkspace();
if(ws!=null) {
Node on = getLastBuiltOn();
getScm().processWorkspaceBeforeDeletion(this, ws, on);
if(on!=null)
on.getFileSystemProvisioner().discardWorkspace(this,ws);
}
super.performDelete();
}
/**
* Does this project perform concurrent builds?
* @since 1.319
*/
@Exported
public boolean isConcurrentBuild() {
return concurrentBuild;
}
public void setConcurrentBuild(boolean b) throws IOException {
concurrentBuild = b;
save();
}
/**
* If this project is configured to be always built on this node,
* return that {@link Node}. Otherwise null.
*/
public @CheckForNull Label getAssignedLabel() {
if(canRoam)
return null;
if(assignedNode==null)
return Jenkins.getInstance().getSelfLabel();
return Jenkins.getInstance().getLabel(assignedNode);
}
/**
* Set of labels relevant to this job.
*
* This method is used to determine what agents are relevant to jobs, for example by {@link View}s.
* It does not affect the scheduling. This information is informational and the best-effort basis.
*
* @since 1.456
* @return
* Minimally it should contain {@link #getAssignedLabel()}. The set can contain null element
* to correspond to the null return value from {@link #getAssignedLabel()}.
*/
public Set<Label> getRelevantLabels() {
return Collections.singleton(getAssignedLabel());
}
/**
* Gets the textual representation of the assigned label as it was entered by the user.
*/
public String getAssignedLabelString() {
if (canRoam || assignedNode==null) return null;
try {
LabelExpression.parseExpression(assignedNode);
return assignedNode;
} catch (ANTLRException e) {
// must be old label or host name that includes whitespace or other unsafe chars
return LabelAtom.escape(assignedNode);
}
}
/**
* Sets the assigned label.
*/
public void setAssignedLabel(Label l) throws IOException {
if(l==null) {
canRoam = true;
assignedNode = null;
} else {
canRoam = false;
if(l== Jenkins.getInstance().getSelfLabel()) assignedNode = null;
else assignedNode = l.getExpression();
}
save();
}
/**
* Assigns this job to the given node. A convenience method over {@link #setAssignedLabel(Label)}.
*/
public void setAssignedNode(Node l) throws IOException {
setAssignedLabel(l.getSelfLabel());
}
/**
* Get the term used in the UI to represent this kind of {@link AbstractProject}.
* Must start with a capital letter.
*/
@Override
public String getPronoun() {
return AlternativeUiTextProvider.get(PRONOUN, this,Messages.AbstractProject_Pronoun());
}
/**
* Gets the human readable display name to be rendered in the "Build Now" link.
*
* @since 1.401
*/
public String getBuildNowText() {
// For compatibility, still use the deprecated replacer if specified.
return AlternativeUiTextProvider.get(BUILD_NOW_TEXT, this, getParameterizedJobMixIn().getBuildNowText());
}
/**
* Gets the nearest ancestor {@link TopLevelItem} that's also an {@link AbstractProject}.
*
* <p>
* Some projects (such as matrix projects, Maven projects, or promotion processes) form a tree of jobs
* that acts as a single unit. This method can be used to find the top most dominating job that
* covers such a tree.
*
* @return never null.
* @see AbstractBuild#getRootBuild()
*/
public AbstractProject<?,?> getRootProject() {
if (this instanceof TopLevelItem) {
return this;
} else {
ItemGroup p = this.getParent();
if (p instanceof AbstractProject)
return ((AbstractProject) p).getRootProject();
return this;
}
}
/**
* Gets the directory where the module is checked out.
*
* @return
* null if the workspace is on an agent that's not connected.
* @deprecated as of 1.319
* To support concurrent builds of the same project, this method is moved to {@link AbstractBuild}.
* For backward compatibility, this method returns the right {@link AbstractBuild#getWorkspace()} if called
* from {@link Executor}, and otherwise the workspace of the last build.
*
* <p>
* If you are calling this method during a build from an executor, switch it to {@link AbstractBuild#getWorkspace()}.
* If you are calling this method to serve a file from the workspace, doing a form validation, etc., then
* use {@link #getSomeWorkspace()}
*/
@Deprecated
public final FilePath getWorkspace() {
AbstractBuild b = getBuildForDeprecatedMethods();
return b != null ? b.getWorkspace() : null;
}
/**
* Various deprecated methods in this class all need the 'current' build. This method returns
* the build suitable for that purpose.
*
* @return An AbstractBuild for deprecated methods to use.
*/
private AbstractBuild getBuildForDeprecatedMethods() {
Executor e = Executor.currentExecutor();
if(e!=null) {
Executable exe = e.getCurrentExecutable();
if (exe instanceof AbstractBuild) {
AbstractBuild b = (AbstractBuild) exe;
if(b.getProject()==this)
return b;
}
}
R lb = getLastBuild();
if(lb!=null) return lb;
return null;
}
/**
* Gets a workspace for some build of this project.
*
* <p>
* This is useful for obtaining a workspace for the purpose of form field validation, where exactly
* which build the workspace belonged is less important. The implementation makes a cursory effort
* to find some workspace.
*
* @return
* null if there's no available workspace.
* @since 1.319
*/
public final @CheckForNull FilePath getSomeWorkspace() {
R b = getSomeBuildWithWorkspace();
if (b!=null) return b.getWorkspace();
for (WorkspaceBrowser browser : ExtensionList.lookup(WorkspaceBrowser.class)) {
FilePath f = browser.getWorkspace(this);
if (f != null) return f;
}
return null;
}
/**
* Gets some build that has a live workspace.
*
* @return null if no such build exists.
*/
public final R getSomeBuildWithWorkspace() {
int cnt=0;
for (R b = getLastBuild(); cnt<5 && b!=null; b=b.getPreviousBuild()) {
FilePath ws = b.getWorkspace();
if (ws!=null) return b;
}
return null;
}
private R getSomeBuildWithExistingWorkspace() throws IOException, InterruptedException {
int cnt=0;
for (R b = getLastBuild(); cnt<5 && b!=null; b=b.getPreviousBuild()) {
FilePath ws = b.getWorkspace();
if (ws!=null && ws.exists()) return b;
}
return null;
}
/**
* Returns the root directory of the checked-out module.
* <p>
* This is usually where <tt>pom.xml</tt>, <tt>build.xml</tt>
* and so on exists.
*
* @deprecated as of 1.319
* See {@link #getWorkspace()} for a migration strategy.
*/
@Deprecated
public FilePath getModuleRoot() {
AbstractBuild b = getBuildForDeprecatedMethods();
return b != null ? b.getModuleRoot() : null;
}
/**
* Returns the root directories of all checked-out modules.
* <p>
* Some SCMs support checking out multiple modules into the same workspace.
* In these cases, the returned array will have a length greater than one.
* @return The roots of all modules checked out from the SCM.
*
* @deprecated as of 1.319
* See {@link #getWorkspace()} for a migration strategy.
*/
@Deprecated
public FilePath[] getModuleRoots() {
AbstractBuild b = getBuildForDeprecatedMethods();
return b != null ? b.getModuleRoots() : null;
}
public int getQuietPeriod() {
return quietPeriod!=null ? quietPeriod : Jenkins.getInstance().getQuietPeriod();
}
public SCMCheckoutStrategy getScmCheckoutStrategy() {
return scmCheckoutStrategy == null ? new DefaultSCMCheckoutStrategyImpl() : scmCheckoutStrategy;
}
public void setScmCheckoutStrategy(SCMCheckoutStrategy scmCheckoutStrategy) throws IOException {
this.scmCheckoutStrategy = scmCheckoutStrategy;
save();
}
public int getScmCheckoutRetryCount() {
return scmCheckoutRetryCount !=null ? scmCheckoutRetryCount : Jenkins.getInstance().getScmCheckoutRetryCount();
}
// ugly name because of EL
public boolean getHasCustomQuietPeriod() {
return quietPeriod!=null;
}
/**
* Sets the custom quiet period of this project, or revert to the global default if null is given.
*/
public void setQuietPeriod(Integer seconds) throws IOException {
this.quietPeriod = seconds;
save();
}
public boolean hasCustomScmCheckoutRetryCount(){
return scmCheckoutRetryCount != null;
}
@Override
public boolean isBuildable() {
return !isDisabled() && !isHoldOffBuildUntilSave();
}
/**
* Used in <tt>sidepanel.jelly</tt> to decide whether to display
* the config/delete/build links.
*/
public boolean isConfigurable() {
return true;
}
public boolean blockBuildWhenDownstreamBuilding() {
return blockBuildWhenDownstreamBuilding;
}
public void setBlockBuildWhenDownstreamBuilding(boolean b) throws IOException {
blockBuildWhenDownstreamBuilding = b;
save();
}
public boolean blockBuildWhenUpstreamBuilding() {
return blockBuildWhenUpstreamBuilding;
}
public void setBlockBuildWhenUpstreamBuilding(boolean b) throws IOException {
blockBuildWhenUpstreamBuilding = b;
save();
}
public boolean isDisabled() {
return disabled;
}
/**
* Validates the retry count Regex
*/
public FormValidation doCheckRetryCount(@QueryParameter String value)throws IOException,ServletException{
// retry count is optional so this is ok
if(value == null || value.trim().equals(""))
return FormValidation.ok();
if (!value.matches("[0-9]*")) {
return FormValidation.error("Invalid retry count");
}
return FormValidation.ok();
}
/**
* Marks the build as disabled.
* The method will ignore the disable command if {@link #supportsMakeDisabled()}
* returns false. The enable command will be executed in any case.
* @param b true - disable, false - enable
* @since 1.585 Do not disable projects if {@link #supportsMakeDisabled()} returns false
*/
public void makeDisabled(boolean b) throws IOException {
if(disabled==b) return; // noop
if (b && !supportsMakeDisabled()) return; // do nothing if the disabling is unsupported
this.disabled = b;
if(b)
Jenkins.getInstance().getQueue().cancel(this);
save();
ItemListener.fireOnUpdated(this);
}
/**
* Specifies whether this project may be disabled by the user.
* By default, it can be only if this is a {@link TopLevelItem};
* would be false for matrix configurations, etc.
* @return true if the GUI should allow {@link #doDisable} and the like
* @since 1.475
*/
public boolean supportsMakeDisabled() {
return this instanceof TopLevelItem;
}
public void disable() throws IOException {
makeDisabled(true);
}
public void enable() throws IOException {
makeDisabled(false);
}
@Override
public BallColor getIconColor() {
if(isDisabled())
return isBuilding() ? BallColor.DISABLED_ANIME : BallColor.DISABLED;
else
return super.getIconColor();
}
/**
* effectively deprecated. Since using updateTransientActions correctly
* under concurrent environment requires a lock that can too easily cause deadlocks.
*
* <p>
* Override {@link #createTransientActions()} instead.
*/
protected void updateTransientActions() {
transientActions = createTransientActions();
}
protected List<Action> createTransientActions() {
Vector<Action> ta = new Vector<Action>();
for (JobProperty<? super P> p : Util.fixNull(properties))
ta.addAll(p.getJobActions((P)this));
for (TransientProjectActionFactory tpaf : TransientProjectActionFactory.all()) {
try {
ta.addAll(Util.fixNull(tpaf.createFor(this))); // be defensive against null
} catch (Exception e) {
LOGGER.log(Level.SEVERE, "Could not load actions from " + tpaf + " for " + this, e);
}
}
return ta;
}
/**
* Returns the live list of all {@link Publisher}s configured for this project.
*
* <p>
* This method couldn't be called <tt>getPublishers()</tt> because existing methods
* in sub-classes return different inconsistent types.
*/
public abstract DescribableList<Publisher,Descriptor<Publisher>> getPublishersList();
@Override
public void addProperty(JobProperty<? super P> jobProp) throws IOException {
super.addProperty(jobProp);
updateTransientActions();
}
public List<ProminentProjectAction> getProminentActions() {
return getActions(ProminentProjectAction.class);
}
@Override
@RequirePOST
public void doConfigSubmit( StaplerRequest req, StaplerResponse rsp ) throws IOException, ServletException, FormException {
super.doConfigSubmit(req,rsp);
updateTransientActions();
// notify the queue as the project might be now tied to different node
Jenkins.getInstance().getQueue().scheduleMaintenance();
// this is to reflect the upstream build adjustments done above
Jenkins.getInstance().rebuildDependencyGraphAsync();
}
/**
* @deprecated
* Use {@link #scheduleBuild(Cause)}. Since 1.283
*/
@Deprecated
public boolean scheduleBuild() {
return getParameterizedJobMixIn().scheduleBuild();
}
/**
* @deprecated
* Use {@link #scheduleBuild(int, Cause)}. Since 1.283
*/
@Deprecated
public boolean scheduleBuild(int quietPeriod) {
return getParameterizedJobMixIn().scheduleBuild(quietPeriod);
}
/**
* Schedules a build of this project.
*
* @return
* true if the project is added to the queue.
* false if the task was rejected from the queue (such as when the system is being shut down.)
*/
public boolean scheduleBuild(Cause c) {
return getParameterizedJobMixIn().scheduleBuild(c);
}
public boolean scheduleBuild(int quietPeriod, Cause c) {
return getParameterizedJobMixIn().scheduleBuild(quietPeriod, c);
}
/**
* Schedules a build.
*
* Important: the actions should be persistable without outside references (e.g. don't store
* references to this project). To provide parameters for a parameterized project, add a ParametersAction. If
* no ParametersAction is provided for such a project, one will be created with the default parameter values.
*
* @param quietPeriod the quiet period to observer
* @param c the cause for this build which should be recorded
* @param actions a list of Actions that will be added to the build
* @return whether the build was actually scheduled
*/
public boolean scheduleBuild(int quietPeriod, Cause c, Action... actions) {
return scheduleBuild2(quietPeriod,c,actions)!=null;
}
/**
* Schedules a build of this project, and returns a {@link Future} object
* to wait for the completion of the build.
*
* @param actions
* For the convenience of the caller, this array can contain null, and those will be silently ignored.
*/
@WithBridgeMethods(Future.class)
public QueueTaskFuture<R> scheduleBuild2(int quietPeriod, Cause c, Action... actions) {
return scheduleBuild2(quietPeriod,c,Arrays.asList(actions));
}
/**
* Schedules a build of this project, and returns a {@link Future} object
* to wait for the completion of the build.
*
* @param actions
* For the convenience of the caller, this collection can contain null, and those will be silently ignored.
* @since 1.383
*/
@SuppressWarnings("unchecked")
@WithBridgeMethods(Future.class)
public QueueTaskFuture<R> scheduleBuild2(int quietPeriod, Cause c, Collection<? extends Action> actions) {
List<Action> queueActions = new ArrayList<Action>(actions);
if (c != null) {
queueActions.add(new CauseAction(c));
}
return getParameterizedJobMixIn().scheduleBuild2(quietPeriod, queueActions.toArray(new Action[queueActions.size()]));
}
/**
* Schedules a build, and returns a {@link Future} object
* to wait for the completion of the build.
*
* <p>
* Production code shouldn't be using this, but for tests this is very convenient, so this isn't marked
* as deprecated.
*/
@SuppressWarnings("deprecation")
@WithBridgeMethods(Future.class)
public QueueTaskFuture<R> scheduleBuild2(int quietPeriod) {
return scheduleBuild2(quietPeriod, new LegacyCodeCause());
}
/**
* Schedules a build of this project, and returns a {@link Future} object
* to wait for the completion of the build.
*/
@WithBridgeMethods(Future.class)
public QueueTaskFuture<R> scheduleBuild2(int quietPeriod, Cause c) {
return scheduleBuild2(quietPeriod, c, new Action[0]);
}
/**
* Schedules a polling of this project.
*/
public boolean schedulePolling() {
if(isDisabled()) return false;
SCMTrigger scmt = getTrigger(SCMTrigger.class);
if(scmt==null) return false;
scmt.run();
return true;
}
/**
* Returns true if the build is in the queue.
*/
@Override
public boolean isInQueue() {
return Jenkins.getInstance().getQueue().contains(this);
}
@Override
public Queue.Item getQueueItem() {
return Jenkins.getInstance().getQueue().getItem(this);
}
/**
* Gets the JDK that this project is configured with, or null.
*/
public JDK getJDK() {
return Jenkins.getInstance().getJDK(jdk);
}
/**
* Overwrites the JDK setting.
*/
public void setJDK(JDK jdk) throws IOException {
this.jdk = jdk.getName();
save();
}
public BuildAuthorizationToken getAuthToken() {
return authToken;
}
@Override
public RunMap<R> _getRuns() {
return buildMixIn._getRuns();
}
@Override
public void removeRun(R run) {
buildMixIn.removeRun(run);
}
/**
* {@inheritDoc}
*
* More efficient implementation.
*/
@Override
public R getBuild(String id) {
return buildMixIn.getBuild(id);
}
/**
* {@inheritDoc}
*
* More efficient implementation.
*/
@Override
public R getBuildByNumber(int n) {
return buildMixIn.getBuildByNumber(n);
}
/**
* {@inheritDoc}
*
* More efficient implementation.
*/
@Override
public R getFirstBuild() {
return buildMixIn.getFirstBuild();
}
@Override
public @CheckForNull R getLastBuild() {
return buildMixIn.getLastBuild();
}
@Override
public R getNearestBuild(int n) {
return buildMixIn.getNearestBuild(n);
}
@Override
public R getNearestOldBuild(int n) {
return buildMixIn.getNearestOldBuild(n);
}
/**
* Type token for the corresponding build type.
* The build class must have two constructors:
* one taking this project type;
* and one taking this project type, then {@link File}.
*/
protected abstract Class<R> getBuildClass();
/**
* Creates a new build of this project for immediate execution.
*/
protected synchronized R newBuild() throws IOException {
return buildMixIn.newBuild();
}
/**
* Loads an existing build record from disk.
*/
protected R loadBuild(File dir) throws IOException {
return buildMixIn.loadBuild(dir);
}
/**
* {@inheritDoc}
*
* <p>
* Note that this method returns a read-only view of {@link Action}s.
* {@link BuildStep}s and others who want to add a project action
* should do so by implementing {@link BuildStep#getProjectActions(AbstractProject)}.
*
* @see TransientProjectActionFactory
*/
@SuppressWarnings("deprecation")
@Override
public List<Action> getActions() {
// add all the transient actions, too
List<Action> actions = new Vector<Action>(super.getActions());
actions.addAll(transientActions);
// return the read only list to cause a failure on plugins who try to add an action here
return Collections.unmodifiableList(actions);
}
// TODO implement addAction, addOrReplaceAction, removeAction, removeActions, replaceActions
/**
* Gets the {@link Node} where this project was last built on.
*
* @return
* null if no information is available (for example,
* if no build was done yet.)
*/
public Node getLastBuiltOn() {
// where was it built on?
AbstractBuild b = getLastBuild();
if(b==null)
return null;
else
return b.getBuiltOn();
}
public Object getSameNodeConstraint() {
return this; // in this way, any member that wants to run with the main guy can nominate the project itself
}
public final Task getOwnerTask() {
return this;
}
@Nonnull
public Authentication getDefaultAuthentication() {
// backward compatible behaviour.
return ACL.SYSTEM;
}
@Nonnull
@Override
public Authentication getDefaultAuthentication(Queue.Item item) {
return getDefaultAuthentication();
}
/**
* {@inheritDoc}
*
* <p>
* A project must be blocked if its own previous build is in progress,
* or if the blockBuildWhenUpstreamBuilding option is true and an upstream
* project is building, but derived classes can also check other conditions.
*/
@Override
public boolean isBuildBlocked() {
return getCauseOfBlockage()!=null;
}
public String getWhyBlocked() {
CauseOfBlockage cb = getCauseOfBlockage();
return cb!=null ? cb.getShortDescription() : null;
}
/**
* @deprecated use {@link BlockedBecauseOfBuildInProgress} instead.
*/
@Deprecated
public static class BecauseOfBuildInProgress extends BlockedBecauseOfBuildInProgress {
public BecauseOfBuildInProgress(@Nonnull AbstractBuild<?, ?> build) {
super(build);
}
}
/**
* Because the downstream build is in progress, and we are configured to wait for that.
*/
public static class BecauseOfDownstreamBuildInProgress extends CauseOfBlockage {
public final AbstractProject<?,?> up;
public BecauseOfDownstreamBuildInProgress(AbstractProject<?,?> up) {
this.up = up;
}
@Override
public String getShortDescription() {
return Messages.AbstractProject_DownstreamBuildInProgress(up.getName());
}
}
/**
* Because the upstream build is in progress, and we are configured to wait for that.
*/
public static class BecauseOfUpstreamBuildInProgress extends CauseOfBlockage {
public final AbstractProject<?,?> up;
public BecauseOfUpstreamBuildInProgress(AbstractProject<?,?> up) {
this.up = up;
}
@Override
public String getShortDescription() {
return Messages.AbstractProject_UpstreamBuildInProgress(up.getName());
}
}
@Override
public CauseOfBlockage getCauseOfBlockage() {
// Block builds until they are done with post-production
if (isLogUpdated() && !isConcurrentBuild()) {
final R lastBuild = getLastBuild();
if (lastBuild != null) {
return new BlockedBecauseOfBuildInProgress(lastBuild);
} else {
// The build has been likely deleted after the isLogUpdated() call.
// Another cause may be an API implementation glitсh in the implementation for AbstractProject.
// Anyway, we should let the code go then.
LOGGER.log(Level.FINE, "The last build has been deleted during the non-concurrent cause creation. The build is not blocked anymore");
}
}
if (blockBuildWhenDownstreamBuilding()) {
AbstractProject<?,?> bup = getBuildingDownstream();
if (bup!=null)
return new BecauseOfDownstreamBuildInProgress(bup);
}
if (blockBuildWhenUpstreamBuilding()) {
AbstractProject<?,?> bup = getBuildingUpstream();
if (bup!=null)
return new BecauseOfUpstreamBuildInProgress(bup);
}
return null;
}
/**
* Returns the project if any of the downstream project is either
* building, waiting, pending or buildable.
* <p>
* This means eventually there will be an automatic triggering of
* the given project (provided that all builds went smoothly.)
*/
public AbstractProject getBuildingDownstream() {
Set<Task> unblockedTasks = Jenkins.getInstance().getQueue().getUnblockedTasks();
for (AbstractProject tup : getTransitiveDownstreamProjects()) {
if (tup!=this && (tup.isBuilding() || unblockedTasks.contains(tup)))
return tup;
}
return null;
}
/**
* Returns the project if any of the upstream project is either
* building or is in the queue.
* <p>
* This means eventually there will be an automatic triggering of
* the given project (provided that all builds went smoothly.)
*/
public AbstractProject getBuildingUpstream() {
Set<Task> unblockedTasks = Jenkins.getInstance().getQueue().getUnblockedTasks();
for (AbstractProject tup : getTransitiveUpstreamProjects()) {
if (tup!=this && (tup.isBuilding() || unblockedTasks.contains(tup)))
return tup;
}
return null;
}
public List<SubTask> getSubTasks() {
List<SubTask> r = new ArrayList<SubTask>();
r.add(this);
for (SubTaskContributor euc : SubTaskContributor.all())
r.addAll(euc.forProject(this));
for (JobProperty<? super P> p : properties)
r.addAll(p.getSubTasks());
return r;
}
public @CheckForNull R createExecutable() throws IOException {
if(isDisabled()) return null;
return newBuild();
}
public void checkAbortPermission() {
checkPermission(CANCEL);
}
public boolean hasAbortPermission() {
return hasPermission(CANCEL);
}
/**
* Gets the {@link Resource} that represents the workspace of this project.
* Useful for locking and mutual exclusion control.
*
* @deprecated as of 1.319
* Projects no longer have a fixed workspace, ands builds will find an available workspace via
* {@link WorkspaceList} for each build (furthermore, that happens after a build is started.)
* So a {@link Resource} representation for a workspace at the project level no longer makes sense.
*
* <p>
* If you need to lock a workspace while you do some computation, see the source code of
* {@link #pollSCMChanges(TaskListener)} for how to obtain a lock of a workspace through {@link WorkspaceList}.
*/
@Deprecated
public Resource getWorkspaceResource() {
return new Resource(getFullDisplayName()+" workspace");
}
/**
* List of necessary resources to perform the build of this project.
*/
public ResourceList getResourceList() {
final Set<ResourceActivity> resourceActivities = getResourceActivities();
final List<ResourceList> resourceLists = new ArrayList<ResourceList>(1 + resourceActivities.size());
for (ResourceActivity activity : resourceActivities) {
if (activity != this && activity != null) {
// defensive infinite recursion and null check
resourceLists.add(activity.getResourceList());
}
}
return ResourceList.union(resourceLists);
}
/**
* Set of child resource activities of the build of this project (override in child projects).
* @return The set of child resource activities of the build of this project.
*/
protected Set<ResourceActivity> getResourceActivities() {
return Collections.emptySet();
}
public boolean checkout(AbstractBuild build, Launcher launcher, BuildListener listener, File changelogFile) throws IOException, InterruptedException {
SCM scm = getScm();
if(scm==null)
return true; // no SCM
FilePath workspace = build.getWorkspace();
workspace.mkdirs();
boolean r = scm.checkout(build, launcher, workspace, listener, changelogFile);
if (r) {
// Only calcRevisionsFromBuild if checkout was successful. Note that modern SCM implementations
// won't reach this line anyway, as they throw AbortExceptions on checkout failure.
calcPollingBaseline(build, launcher, listener);
}
return r;
}
/**
* Pushes the baseline up to the newly checked out revision.
*/
private void calcPollingBaseline(AbstractBuild build, Launcher launcher, TaskListener listener) throws IOException, InterruptedException {
SCMRevisionState baseline = build.getAction(SCMRevisionState.class);
if (baseline==null) {
try {
baseline = getScm().calcRevisionsFromBuild(build, launcher, listener);
} catch (AbstractMethodError e) {
baseline = SCMRevisionState.NONE; // pre-1.345 SCM implementations, which doesn't use the baseline in polling
}
if (baseline!=null)
build.addAction(baseline);
}
pollingBaseline = baseline;
}
/**
* Checks if there's any update in SCM, and returns true if any is found.
*
* @deprecated as of 1.346
* Use {@link #poll(TaskListener)} instead.
*/
@Deprecated
public boolean pollSCMChanges( TaskListener listener ) {
return poll(listener).hasChanges();
}
/**
* Checks if there's any update in SCM, and returns true if any is found.
*
* <p>
* The implementation is responsible for ensuring mutual exclusion between polling and builds
* if necessary.
*
* @since 1.345
*/
public PollingResult poll( TaskListener listener ) {
SCM scm = getScm();
if (scm==null) {
listener.getLogger().println(Messages.AbstractProject_NoSCM());
return NO_CHANGES;
}
if (!isBuildable()) {
listener.getLogger().println(Messages.AbstractProject_Disabled());
return NO_CHANGES;
}
SCMDecisionHandler veto = SCMDecisionHandler.firstShouldPollVeto(this);
if (veto != null) {
listener.getLogger().println(Messages.AbstractProject_PollingVetoed(veto));
return NO_CHANGES;
}
R lb = getLastBuild();
if (lb==null) {
listener.getLogger().println(Messages.AbstractProject_NoBuilds());
return isInQueue() ? NO_CHANGES : BUILD_NOW;
}
if (pollingBaseline==null) {
R success = getLastSuccessfulBuild(); // if we have a persisted baseline, we'll find it by this
for (R r=lb; r!=null; r=r.getPreviousBuild()) {
SCMRevisionState s = r.getAction(SCMRevisionState.class);
if (s!=null) {
pollingBaseline = s;
break;
}
if (r==success) break; // searched far enough
}
// NOTE-NO-BASELINE:
// if we don't have baseline yet, it means the data is built by old Hudson that doesn't set the baseline
// as action, so we need to compute it. This happens later.
}
try {
SCMPollListener.fireBeforePolling(this, listener);
PollingResult r = _poll(listener, scm);
SCMPollListener.firePollingSuccess(this,listener, r);
return r;
} catch (AbortException e) {
listener.getLogger().println(e.getMessage());
listener.fatalError(Messages.AbstractProject_Aborted());
LOGGER.log(Level.FINE, "Polling "+this+" aborted",e);
SCMPollListener.firePollingFailed(this, listener,e);
return NO_CHANGES;
} catch (IOException e) {
Functions.printStackTrace(e, listener.fatalError(e.getMessage()));
SCMPollListener.firePollingFailed(this, listener,e);
return NO_CHANGES;
} catch (InterruptedException e) {
Functions.printStackTrace(e, listener.fatalError(Messages.AbstractProject_PollingABorted()));
SCMPollListener.firePollingFailed(this, listener,e);
return NO_CHANGES;
} catch (RuntimeException e) {
SCMPollListener.firePollingFailed(this, listener,e);
throw e;
} catch (Error e) {
SCMPollListener.firePollingFailed(this, listener,e);
throw e;
}
}
/**
* {@link #poll(TaskListener)} method without the try/catch block that does listener notification and .
*/
private PollingResult _poll(TaskListener listener, SCM scm) throws IOException, InterruptedException {
if (scm.requiresWorkspaceForPolling()) {
R b = getSomeBuildWithExistingWorkspace();
if (b == null) b = getLastBuild();
// lock the workspace for the given build
FilePath ws=b.getWorkspace();
WorkspaceOfflineReason workspaceOfflineReason = workspaceOffline( b );
if ( workspaceOfflineReason != null ) {
// workspace offline
for (WorkspaceBrowser browser : ExtensionList.lookup(WorkspaceBrowser.class)) {
ws = browser.getWorkspace(this);
if (ws != null) {
return pollWithWorkspace(listener, scm, b, ws, browser.getWorkspaceList());
}
}
// At this point we start thinking about triggering a build just to get a workspace,
// because otherwise there's no way we can detect changes.
// However, first there are some conditions in which we do not want to do so.
// give time for agents to come online if we are right after reconnection (JENKINS-8408)
long running = Jenkins.getInstance().getInjector().getInstance(Uptime.class).getUptime();
long remaining = TimeUnit2.MINUTES.toMillis(10)-running;
if (remaining>0 && /* this logic breaks tests of polling */!Functions.getIsUnitTest()) {
listener.getLogger().print(Messages.AbstractProject_AwaitingWorkspaceToComeOnline(remaining/1000));
listener.getLogger().println( " (" + workspaceOfflineReason.name() + ")");
return NO_CHANGES;
}
// Do not trigger build, if no suitable agent is online
if (workspaceOfflineReason.equals(WorkspaceOfflineReason.all_suitable_nodes_are_offline)) {
// No suitable executor is online
listener.getLogger().print(Messages.AbstractProject_AwaitingWorkspaceToComeOnline(running/1000));
listener.getLogger().println( " (" + workspaceOfflineReason.name() + ")");
return NO_CHANGES;
}
Label label = getAssignedLabel();
if (label != null && label.isSelfLabel()) {
// if the build is fixed on a node, then attempting a build will do us
// no good. We should just wait for the agent to come back.
listener.getLogger().print(Messages.AbstractProject_NoWorkspace());
listener.getLogger().println( " (" + workspaceOfflineReason.name() + ")");
return NO_CHANGES;
}
listener.getLogger().println( ws==null
? Messages.AbstractProject_WorkspaceOffline()
: Messages.AbstractProject_NoWorkspace());
if (isInQueue()) {
listener.getLogger().println(Messages.AbstractProject_AwaitingBuildForWorkspace());
return NO_CHANGES;
}
// build now, or nothing will ever be built
listener.getLogger().print(Messages.AbstractProject_NewBuildForWorkspace());
listener.getLogger().println( " (" + workspaceOfflineReason.name() + ")");
return BUILD_NOW;
} else {
WorkspaceList l = b.getBuiltOn().toComputer().getWorkspaceList();
return pollWithWorkspace(listener, scm, b, ws, l);
}
} else {
// polling without workspace
LOGGER.fine("Polling SCM changes of " + getName());
if (pollingBaseline==null) // see NOTE-NO-BASELINE above
calcPollingBaseline(getLastBuild(),null,listener);
PollingResult r = scm.poll(this, null, null, listener, pollingBaseline);
pollingBaseline = r.remote;
return r;
}
}
private PollingResult pollWithWorkspace(TaskListener listener, SCM scm, R lb, @Nonnull FilePath ws, WorkspaceList l) throws InterruptedException, IOException {
// if doing non-concurrent build, acquire a workspace in a way that causes builds to block for this workspace.
// this prevents multiple workspaces of the same job --- the behavior of Hudson < 1.319.
//
// OTOH, if a concurrent build is chosen, the user is willing to create a multiple workspace,
// so better throughput is achieved over time (modulo the initial cost of creating that many workspaces)
// by having multiple workspaces
Node node = lb.getBuiltOn();
Launcher launcher = ws.createLauncher(listener).decorateByEnv(getEnvironment(node,listener));
WorkspaceList.Lease lease = l.acquire(ws, !concurrentBuild);
try {
String nodeName = node != null ? node.getSelfLabel().getName() : "[node_unavailable]";
listener.getLogger().println("Polling SCM changes on " + nodeName);
LOGGER.fine("Polling SCM changes of " + getName());
if (pollingBaseline==null) // see NOTE-NO-BASELINE above
calcPollingBaseline(lb,launcher,listener);
PollingResult r = scm.poll(this, launcher, ws, listener, pollingBaseline);
pollingBaseline = r.remote;
return r;
} finally {
lease.release();
}
}
enum WorkspaceOfflineReason {
nonexisting_workspace,
builton_node_gone,
builton_node_no_executors,
all_suitable_nodes_are_offline,
use_ondemand_slave
}
/**
* Returns true if all suitable nodes for the job are offline.
*
*/
private boolean isAllSuitableNodesOffline(R build) {
Label label = getAssignedLabel();
List<Node> allNodes = Jenkins.getInstance().getNodes();
if (label != null) {
//Invalid label. Put in queue to make administrator fix
if(label.getNodes().isEmpty()) {
return false;
}
//Returns true, if all suitable nodes are offline
return label.isOffline();
} else {
if(canRoam) {
for (Node n : Jenkins.getInstance().getNodes()) {
Computer c = n.toComputer();
if (c != null && c.isOnline() && c.isAcceptingTasks() && n.getMode() == Mode.NORMAL) {
// Some executor is online that is ready and this job can run anywhere
return false;
}
}
//We can roam, check that the master is set to be used as much as possible, and not tied jobs only.
if(Jenkins.getInstance().getMode() == Mode.EXCLUSIVE) {
return true;
} else {
return false;
}
}
}
return true;
}
private WorkspaceOfflineReason workspaceOffline(R build) throws IOException, InterruptedException {
FilePath ws = build.getWorkspace();
Label label = getAssignedLabel();
if (isAllSuitableNodesOffline(build)) {
Collection<Cloud> applicableClouds = label == null ? Jenkins.getInstance().clouds : label.getClouds();
return applicableClouds.isEmpty() ? WorkspaceOfflineReason.all_suitable_nodes_are_offline : WorkspaceOfflineReason.use_ondemand_slave;
}
if (ws==null || !ws.exists()) {
return WorkspaceOfflineReason.nonexisting_workspace;
}
Node builtOn = build.getBuiltOn();
if (builtOn == null) { // node built-on doesn't exist anymore
return WorkspaceOfflineReason.builton_node_gone;
}
if (builtOn.toComputer() == null) { // node still exists, but has 0 executors - o.s.l.t.
return WorkspaceOfflineReason.builton_node_no_executors;
}
return null;
}
/**
* Returns true if this user has made a commit to this project.
*
* @since 1.191
*/
public boolean hasParticipant(User user) {
for( R build = getLastBuild(); build!=null; build=build.getPreviousBuild())
if(build.hasParticipant(user))
return true;
return false;
}
@Exported
public SCM getScm() {
return scm;
}
public void setScm(SCM scm) throws IOException {
this.scm = scm;
save();
}
/**
* Adds a new {@link Trigger} to this {@link Project} if not active yet.
*/
public void addTrigger(Trigger<?> trigger) throws IOException {
addToList(trigger,triggers());
}
public void removeTrigger(TriggerDescriptor trigger) throws IOException {
removeFromList(trigger,triggers());
}
protected final synchronized <T extends Describable<T>>
void addToList( T item, List<T> collection ) throws IOException {
//No support to replace item in position, remove then add
removeFromList(item.getDescriptor(), collection);
collection.add(item);
save();
updateTransientActions();
}
protected final synchronized <T extends Describable<T>>
void removeFromList(Descriptor<T> item, List<T> collection) throws IOException {
final Iterator<T> iCollection = collection.iterator();
while(iCollection.hasNext()) {
final T next = iCollection.next();
if(next.getDescriptor()==item) {
// found it
iCollection.remove();
save();
updateTransientActions();
return;
}
}
}
@SuppressWarnings("unchecked")
@Override public Map<TriggerDescriptor,Trigger<?>> getTriggers() {
return triggers().toMap();
}
/**
* Gets the specific trigger, or null if the property is not configured for this job.
*/
public <T extends Trigger> T getTrigger(Class<T> clazz) {
for (Trigger p : triggers()) {
if(clazz.isInstance(p))
return clazz.cast(p);
}
return null;
}
//
//
// fingerprint related
//
//
/**
* True if the builds of this project produces {@link Fingerprint} records.
*/
public abstract boolean isFingerprintConfigured();
/**
* Gets the other {@link AbstractProject}s that should be built
* when a build of this project is completed.
*/
@Exported
public final List<AbstractProject> getDownstreamProjects() {
return Jenkins.getInstance().getDependencyGraph().getDownstream(this);
}
@Exported
public final List<AbstractProject> getUpstreamProjects() {
return Jenkins.getInstance().getDependencyGraph().getUpstream(this);
}
/**
* Returns only those upstream projects that defines {@link BuildTrigger} to this project.
* This is a subset of {@link #getUpstreamProjects()}
* <p>No longer used in the UI.
* @return A List of upstream projects that has a {@link BuildTrigger} to this project.
*/
public final List<AbstractProject> getBuildTriggerUpstreamProjects() {
ArrayList<AbstractProject> result = new ArrayList<AbstractProject>();
for (AbstractProject<?,?> ap : getUpstreamProjects()) {
BuildTrigger buildTrigger = ap.getPublishersList().get(BuildTrigger.class);
if (buildTrigger != null)
if (buildTrigger.getChildProjects(ap).contains(this))
result.add(ap);
}
return result;
}
/**
* Gets all the upstream projects including transitive upstream projects.
*
* @since 1.138
*/
public final Set<AbstractProject> getTransitiveUpstreamProjects() {
return Jenkins.getInstance().getDependencyGraph().getTransitiveUpstream(this);
}
/**
* Gets all the downstream projects including transitive downstream projects.
*
* @since 1.138
*/
public final Set<AbstractProject> getTransitiveDownstreamProjects() {
return Jenkins.getInstance().getDependencyGraph().getTransitiveDownstream(this);
}
/**
* Gets the dependency relationship map between this project (as the source)
* and that project (as the sink.)
*
* @return
* can be empty but not null. build number of this project to the build
* numbers of that project.
*/
public SortedMap<Integer, RangeSet> getRelationship(AbstractProject that) {
TreeMap<Integer,RangeSet> r = new TreeMap<Integer,RangeSet>(REVERSE_INTEGER_COMPARATOR);
checkAndRecord(that, r, this.getBuilds());
// checkAndRecord(that, r, that.getBuilds());
return r;
}
/**
* Helper method for getDownstreamRelationship.
*
* For each given build, find the build number range of the given project and put that into the map.
*/
private void checkAndRecord(AbstractProject that, TreeMap<Integer, RangeSet> r, Collection<R> builds) {
for (R build : builds) {
RangeSet rs = build.getDownstreamRelationship(that);
if(rs==null || rs.isEmpty())
continue;
int n = build.getNumber();
RangeSet value = r.get(n);
if(value==null)
r.put(n,rs);
else
value.add(rs);
}
}
/**
* Builds the dependency graph.
* Since 1.558, not abstract and by default includes dependencies contributed by {@link #triggers()}.
*/
protected void buildDependencyGraph(DependencyGraph graph) {
triggers().buildDependencyGraph(this, graph);
}
@Override
protected SearchIndexBuilder makeSearchIndex() {
return getParameterizedJobMixIn().extendSearchIndex(super.makeSearchIndex());
}
@Override
protected HistoryWidget createHistoryWidget() {
return buildMixIn.createHistoryWidget();
}
public boolean isParameterized() {
return getParameterizedJobMixIn().isParameterized();
}
//
//
// actions
//
//
/**
* Schedules a new build command.
*/
public void doBuild( StaplerRequest req, StaplerResponse rsp, @QueryParameter TimeDuration delay ) throws IOException, ServletException {
getParameterizedJobMixIn().doBuild(req, rsp, delay);
}
/** @deprecated use {@link #doBuild(StaplerRequest, StaplerResponse, TimeDuration)} */
@Deprecated
public void doBuild(StaplerRequest req, StaplerResponse rsp) throws IOException, ServletException {
doBuild(req, rsp, TimeDuration.fromString(req.getParameter("delay")));
}
/**
* Computes the delay by taking the default value and the override in the request parameter into the account.
*
* @deprecated as of 1.489
* Inject {@link TimeDuration}.
*/
@Deprecated
public int getDelay(StaplerRequest req) throws ServletException {
String delay = req.getParameter("delay");
if (delay==null) return getQuietPeriod();
try {
// TODO: more unit handling
if(delay.endsWith("sec")) delay=delay.substring(0,delay.length()-3);
if(delay.endsWith("secs")) delay=delay.substring(0,delay.length()-4);
return Integer.parseInt(delay);
} catch (NumberFormatException e) {
throw new ServletException("Invalid delay parameter value: "+delay);
}
}
/**
* Supports build trigger with parameters via an HTTP GET or POST.
* Currently only String parameters are supported.
*/
public void doBuildWithParameters(StaplerRequest req, StaplerResponse rsp, @QueryParameter TimeDuration delay) throws IOException, ServletException {
getParameterizedJobMixIn().doBuildWithParameters(req, rsp, delay);
}
/** @deprecated use {@link #doBuildWithParameters(StaplerRequest, StaplerResponse, TimeDuration)} */
@Deprecated
public void doBuildWithParameters(StaplerRequest req, StaplerResponse rsp) throws IOException, ServletException {
doBuildWithParameters(req, rsp, TimeDuration.fromString(req.getParameter("delay")));
}
/**
* Schedules a new SCM polling command.
*/
public void doPolling( StaplerRequest req, StaplerResponse rsp ) throws IOException, ServletException {
BuildAuthorizationToken.checkPermission((Job) this, authToken, req, rsp);
schedulePolling();
rsp.sendRedirect(".");
}
/**
* Cancels a scheduled build.
*/
@RequirePOST
public void doCancelQueue( StaplerRequest req, StaplerResponse rsp ) throws IOException, ServletException {
getParameterizedJobMixIn().doCancelQueue(req, rsp);
}
@Override
protected void submit(StaplerRequest req, StaplerResponse rsp) throws IOException, ServletException, FormException {
super.submit(req,rsp);
JSONObject json = req.getSubmittedForm();
makeDisabled(json.optBoolean("disable"));
jdk = json.optString("jdk", null);
if(json.optBoolean("hasCustomQuietPeriod", json.has("quiet_period"))) {
quietPeriod = json.optInt("quiet_period");
} else {
quietPeriod = null;
}
if(json.optBoolean("hasCustomScmCheckoutRetryCount", json.has("scmCheckoutRetryCount"))) {
scmCheckoutRetryCount = json.optInt("scmCheckoutRetryCount");
} else {
scmCheckoutRetryCount = null;
}
blockBuildWhenDownstreamBuilding = json.optBoolean("blockBuildWhenDownstreamBuilding");
blockBuildWhenUpstreamBuilding = json.optBoolean("blockBuildWhenUpstreamBuilding");
if(req.hasParameter("customWorkspace.directory")) {
// Workaround for JENKINS-25221 while plugins are being updated.
LOGGER.log(Level.WARNING, "label assignment is using legacy 'customWorkspace.directory'");
customWorkspace = Util.fixEmptyAndTrim(req.getParameter("customWorkspace.directory"));
} else if(json.optBoolean("hasCustomWorkspace", json.has("customWorkspace"))) {
customWorkspace = Util.fixEmptyAndTrim(json.optString("customWorkspace"));
} else {
customWorkspace = null;
}
if (json.has("scmCheckoutStrategy"))
scmCheckoutStrategy = req.bindJSON(SCMCheckoutStrategy.class,
json.getJSONObject("scmCheckoutStrategy"));
else
scmCheckoutStrategy = null;
if(json.optBoolean("hasSlaveAffinity", json.has("label"))) {
assignedNode = Util.fixEmptyAndTrim(json.optString("label"));
} else if(req.hasParameter("_.assignedLabelString")) {
// Workaround for JENKINS-25372 while plugin is being updated.
// Keep this condition second for JENKINS-25533
LOGGER.log(Level.WARNING, "label assignment is using legacy '_.assignedLabelString'");
assignedNode = Util.fixEmptyAndTrim(req.getParameter("_.assignedLabelString"));
} else {
assignedNode = null;
}
canRoam = assignedNode==null;
keepDependencies = json.has("keepDependencies");
concurrentBuild = json.optBoolean("concurrentBuild");
authToken = BuildAuthorizationToken.create(req);
setScm(SCMS.parseSCM(req,this));
for (Trigger t : triggers())
t.stop();
triggers.replaceBy(buildDescribable(req, Trigger.for_(this)));
for (Trigger t : triggers())
t.start(this,true);
}
/**
* @deprecated
* As of 1.261. Use {@link #buildDescribable(StaplerRequest, List)} instead.
*/
@Deprecated
protected final <T extends Describable<T>> List<T> buildDescribable(StaplerRequest req, List<? extends Descriptor<T>> descriptors, String prefix) throws FormException, ServletException {
return buildDescribable(req,descriptors);
}
protected final <T extends Describable<T>> List<T> buildDescribable(StaplerRequest req, List<? extends Descriptor<T>> descriptors)
throws FormException, ServletException {
JSONObject data = req.getSubmittedForm();
List<T> r = new Vector<T>();
for (Descriptor<T> d : descriptors) {
String safeName = d.getJsonSafeClassName();
if (req.getParameter(safeName) != null) {
T instance = d.newInstance(req, data.getJSONObject(safeName));
r.add(instance);
}
}
return r;
}
/**
* Serves the workspace files.
*/
public DirectoryBrowserSupport doWs( StaplerRequest req, StaplerResponse rsp ) throws IOException, ServletException, InterruptedException {
checkPermission(Item.WORKSPACE);
FilePath ws = getSomeWorkspace();
if ((ws == null) || (!ws.exists())) {
// if there's no workspace, report a nice error message
// Would be good if when asked for *plain*, do something else!
// (E.g. return 404, or send empty doc.)
// Not critical; client can just check if content type is not text/plain,
// which also serves to detect old versions of Hudson.
req.getView(this,"noWorkspace.jelly").forward(req,rsp);
return null;
} else {
Computer c = ws.toComputer();
String title;
if (c == null) {
title = Messages.AbstractProject_WorkspaceTitle(getDisplayName());
} else {
title = Messages.AbstractProject_WorkspaceTitleOnComputer(getDisplayName(), c.getDisplayName());
}
return new DirectoryBrowserSupport(this, ws, title, "folder.png", true);
}
}
/**
* Wipes out the workspace.
*/
@RequirePOST
public HttpResponse doDoWipeOutWorkspace() throws IOException, ServletException, InterruptedException {
checkPermission(Functions.isWipeOutPermissionEnabled() ? WIPEOUT : BUILD);
R b = getSomeBuildWithWorkspace();
FilePath ws = b!=null ? b.getWorkspace() : null;
if (ws!=null && getScm().processWorkspaceBeforeDeletion(this, ws, b.getBuiltOn())) {
ws.deleteRecursive();
for (WorkspaceListener wl : WorkspaceListener.all()) {
wl.afterDelete(this);
}
return new HttpRedirect(".");
} else {
// If we get here, that means the SCM blocked the workspace deletion.
return new ForwardToView(this,"wipeOutWorkspaceBlocked.jelly");
}
}
@CLIMethod(name="disable-job")
@RequirePOST
public HttpResponse doDisable() throws IOException, ServletException {
checkPermission(CONFIGURE);
makeDisabled(true);
return new HttpRedirect(".");
}
@CLIMethod(name="enable-job")
@RequirePOST
public HttpResponse doEnable() throws IOException, ServletException {
checkPermission(CONFIGURE);
makeDisabled(false);
return new HttpRedirect(".");
}
/**
* RSS feed for changes in this project.
*/
public void doRssChangelog( StaplerRequest req, StaplerResponse rsp ) throws IOException, ServletException {
class FeedItem {
ChangeLogSet.Entry e;
int idx;
public FeedItem(Entry e, int idx) {
this.e = e;
this.idx = idx;
}
AbstractBuild<?,?> getBuild() {
return e.getParent().build;
}
}
List<FeedItem> entries = new ArrayList<FeedItem>();
for(R r=getLastBuild(); r!=null; r=r.getPreviousBuild()) {
int idx=0;
for( ChangeLogSet.Entry e : r.getChangeSet())
entries.add(new FeedItem(e,idx++));
}
RSS.forwardToRss(
getDisplayName()+' '+getScm().getDescriptor().getDisplayName()+" changes",
getUrl()+"changes",
entries, new FeedAdapter<FeedItem>() {
public String getEntryTitle(FeedItem item) {
return "#"+item.getBuild().number+' '+item.e.getMsg()+" ("+item.e.getAuthor()+")";
}
public String getEntryUrl(FeedItem item) {
return item.getBuild().getUrl()+"changes#detail"+item.idx;
}
public String getEntryID(FeedItem item) {
return getEntryUrl(item);
}
public String getEntryDescription(FeedItem item) {
StringBuilder buf = new StringBuilder();
for(String path : item.e.getAffectedPaths())
buf.append(path).append('\n');
return buf.toString();
}
public Calendar getEntryTimestamp(FeedItem item) {
return item.getBuild().getTimestamp();
}
public String getEntryAuthor(FeedItem entry) {
return JenkinsLocationConfiguration.get().getAdminAddress();
}
},
req, rsp );
}
/**
* {@link AbstractProject} subtypes should implement this base class as a descriptor.
*
* @since 1.294
*/
public static abstract class AbstractProjectDescriptor extends TopLevelItemDescriptor {
/**
* {@link AbstractProject} subtypes can override this method to veto some {@link Descriptor}s
* from showing up on their configuration screen. This is often useful when you are building
* a workflow/company specific project type, where you want to limit the number of choices
* given to the users.
*
* <p>
* Some {@link Descriptor}s define their own schemes for controlling applicability
* (such as {@link BuildStepDescriptor#isApplicable(Class)}),
* This method works like AND in conjunction with them;
* Both this method and that method need to return true in order for a given {@link Descriptor}
* to show up for the given {@link Project}.
*
* <p>
* The default implementation returns true for everything.
*
* @see BuildStepDescriptor#isApplicable(Class)
* @see BuildWrapperDescriptor#isApplicable(AbstractProject)
* @see TriggerDescriptor#isApplicable(Item)
*/
@Override
public boolean isApplicable(Descriptor descriptor) {
return true;
}
@Restricted(DoNotUse.class)
public FormValidation doCheckAssignedLabelString(@AncestorInPath AbstractProject<?,?> project,
@QueryParameter String value) {
// Provide a legacy interface in case plugins are not going through p:config-assignedLabel
// see: JENKINS-25372
LOGGER.log(Level.WARNING, "checking label via legacy '_.assignedLabelString'");
return doCheckLabel(project, value);
}
public FormValidation doCheckLabel(@AncestorInPath AbstractProject<?,?> project,
@QueryParameter String value) {
return validateLabelExpression(value, project);
}
/**
* Validate label expression string.
*
* @param project May be specified to perform project specific validation.
* @since 1.590
*/
public static @Nonnull FormValidation validateLabelExpression(String value, @CheckForNull AbstractProject<?, ?> project) {
if (Util.fixEmpty(value)==null)
return FormValidation.ok(); // nothing typed yet
try {
Label.parseExpression(value);
} catch (ANTLRException e) {
return FormValidation.error(e,
Messages.AbstractProject_AssignedLabelString_InvalidBooleanExpression(e.getMessage()));
}
Jenkins j = Jenkins.getInstance();
Label l = j.getLabel(value);
if (l.isEmpty()) {
for (LabelAtom a : l.listAtoms()) {
if (a.isEmpty()) {
LabelAtom nearest = LabelAtom.findNearest(a.getName());
return FormValidation.warning(Messages.AbstractProject_AssignedLabelString_NoMatch_DidYouMean(a.getName(),nearest.getDisplayName()));
}
}
return FormValidation.warning(Messages.AbstractProject_AssignedLabelString_NoMatch());
}
if (project != null) {
for (AbstractProject.LabelValidator v : j
.getExtensionList(AbstractProject.LabelValidator.class)) {
FormValidation result = v.check(project, l);
if (!FormValidation.Kind.OK.equals(result.kind)) {
return result;
}
}
}
return FormValidation.okWithMarkup(Messages.AbstractProject_LabelLink(
j.getRootUrl(), Util.escape(l.getName()), l.getUrl(), l.getNodes().size(), l.getClouds().size())
);
}
public FormValidation doCheckCustomWorkspace(@QueryParameter String customWorkspace){
if(Util.fixEmptyAndTrim(customWorkspace)==null)
return FormValidation.error(Messages.AbstractProject_CustomWorkspaceEmpty());
else
return FormValidation.ok();
}
public AutoCompletionCandidates doAutoCompleteUpstreamProjects(@QueryParameter String value) {
AutoCompletionCandidates candidates = new AutoCompletionCandidates();
List<Job> jobs = Jenkins.getInstance().getItems(Job.class);
for (Job job: jobs) {
if (job.getFullName().startsWith(value)) {
if (job.hasPermission(Item.READ)) {
candidates.add(job.getFullName());
}
}
}
return candidates;
}
@Restricted(DoNotUse.class)
public AutoCompletionCandidates doAutoCompleteAssignedLabelString(@QueryParameter String value) {
// Provide a legacy interface in case plugins are not going through p:config-assignedLabel
// see: JENKINS-25372
LOGGER.log(Level.WARNING, "autocompleting label via legacy '_.assignedLabelString'");
return doAutoCompleteLabel(value);
}
public AutoCompletionCandidates doAutoCompleteLabel(@QueryParameter String value) {
AutoCompletionCandidates c = new AutoCompletionCandidates();
Set<Label> labels = Jenkins.getInstance().getLabels();
List<String> queries = new AutoCompleteSeeder(value).getSeeds();
for (String term : queries) {
for (Label l : labels) {
if (l.getName().startsWith(term)) {
c.add(l.getName());
}
}
}
return c;
}
public List<SCMCheckoutStrategyDescriptor> getApplicableSCMCheckoutStrategyDescriptors(AbstractProject p) {
return SCMCheckoutStrategyDescriptor._for(p);
}
/**
* Utility class for taking the current input value and computing a list
* of potential terms to match against the list of defined labels.
*/
static class AutoCompleteSeeder {
private String source;
AutoCompleteSeeder(String source) {
this.source = source;
}
List<String> getSeeds() {
ArrayList<String> terms = new ArrayList<String>();
boolean trailingQuote = source.endsWith("\"");
boolean leadingQuote = source.startsWith("\"");
boolean trailingSpace = source.endsWith(" ");
if (trailingQuote || (trailingSpace && !leadingQuote)) {
terms.add("");
} else {
if (leadingQuote) {
int quote = source.lastIndexOf('"');
if (quote == 0) {
terms.add(source.substring(1));
} else {
terms.add("");
}
} else {
int space = source.lastIndexOf(' ');
if (space > -1) {
terms.add(source.substring(space+1));
} else {
terms.add(source);
}
}
}
return terms;
}
}
}
/**
* Finds a {@link AbstractProject} that has the name closest to the given name.
* @see Items#findNearest
*/
public static @CheckForNull AbstractProject findNearest(String name) {
return findNearest(name,Jenkins.getInstance());
}
/**
* Finds a {@link AbstractProject} whose name (when referenced from the specified context) is closest to the given name.
*
* @since 1.419
* @see Items#findNearest
*/
public static @CheckForNull AbstractProject findNearest(String name, ItemGroup context) {
return Items.findNearest(AbstractProject.class, name, context);
}
private static final Comparator<Integer> REVERSE_INTEGER_COMPARATOR = new Comparator<Integer>() {
public int compare(Integer o1, Integer o2) {
return o2-o1;
}
};
private static final Logger LOGGER = Logger.getLogger(AbstractProject.class.getName());
/**
* @deprecated Just use {@link #CANCEL}.
*/
@Deprecated
public static final Permission ABORT = CANCEL;
/**
* @deprecated Use {@link ParameterizedJobMixIn#BUILD_NOW_TEXT}.
*/
@Deprecated
public static final Message<AbstractProject> BUILD_NOW_TEXT = new Message<AbstractProject>();
/**
* Used for CLI binding.
*/
@CLIResolver
public static AbstractProject resolveForCLI(
@Argument(required=true,metaVar="NAME",usage="Job name") String name) throws CmdLineException {
AbstractProject item = Jenkins.getInstance().getItemByFullName(name, AbstractProject.class);
if (item==null) {
AbstractProject project = AbstractProject.findNearest(name);
throw new CmdLineException(null, project == null ? Messages.AbstractItem_NoSuchJobExistsWithoutSuggestion(name)
: Messages.AbstractItem_NoSuchJobExists(name, project.getFullName()));
}
return item;
}
public String getCustomWorkspace() {
return customWorkspace;
}
/**
* User-specified workspace directory, or null if it's up to Jenkins.
*
* <p>
* Normally a project uses the workspace location assigned by its parent container,
* but sometimes people have builds that have hard-coded paths.
*
* <p>
* This is not {@link File} because it may have to hold a path representation on another OS.
*
* <p>
* If this path is relative, it's resolved against {@link Node#getRootPath()} on the node where this workspace
* is prepared.
*
* @since 1.410
*/
public void setCustomWorkspace(String customWorkspace) throws IOException {
this.customWorkspace= Util.fixEmptyAndTrim(customWorkspace);
save();
}
/**
* Plugins may want to contribute additional restrictions on the use of specific labels for specific projects.
* This extension point allows such restrictions.
*
* @since 1.540
*/
public static abstract class LabelValidator implements ExtensionPoint {
/**
* Check the use of the label within the specified context.
*
* @param project the project that wants to restrict itself to the specified label.
* @param label the label that the project wants to restrict itself to.
* @return the {@link FormValidation} result.
*/
@Nonnull
public abstract FormValidation check(@Nonnull AbstractProject<?, ?> project, @Nonnull Label label);
}
}
| 84,413 | 35.574523 | 218 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/AbstractProject2.java
|
/*
* The MIT License
*
* Copyright (c) 2004-2011, Sun Microsystems, Inc., Kohsuke Kawaguchi,
* Brian Westrich, Erik Ramfelt, Ertan Deniz, Jean-Baptiste Quenot,
* Luca Domenico Milanesio, R. Tyler Ballance, Stephen Connolly, Tom Huybrechts,
* id:cactusman, Yahoo! Inc., Andrew Bayer, Manufacture Francaise des Pneumatiques
* Michelin, Romain Seguy
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package hudson.model;
import antlr.ANTLRException;
import com.infradna.tool.bridge_method_injector.WithBridgeMethods;
import hudson.AbortException;
import hudson.CopyOnWrite;
import hudson.EnvVars;
import hudson.ExtensionList;
import hudson.ExtensionPoint;
import hudson.FeedAdapter;
import hudson.FilePath;
import hudson.Functions;
import hudson.Launcher;
import hudson.Util;
import hudson.cli.declarative.CLIMethod;
import hudson.cli.declarative.CLIResolver;
import hudson.model.Cause.LegacyCodeCause;
import hudson.model.Descriptor.FormException;
import hudson.model.Fingerprint.RangeSet;
import hudson.model.Node.Mode;
import hudson.model.Queue.Executable;
import hudson.model.Queue.Task;
import hudson.model.labels.LabelAtom;
import hudson.model.labels.LabelExpression;
import hudson.model.listeners.ItemListener;
import hudson.model.listeners.SCMPollListener;
import hudson.model.queue.CauseOfBlockage;
import hudson.model.queue.QueueTaskFuture;
import hudson.model.queue.SubTask;
import hudson.model.queue.SubTaskContributor;
import hudson.scm.ChangeLogSet;
import hudson.scm.ChangeLogSet.Entry;
import hudson.scm.NullSCM;
import hudson.scm.PollingResult;
import static hudson.scm.PollingResult.*;
import hudson.scm.SCM;
import hudson.scm.SCMRevisionState;
import hudson.scm.SCMS;
import hudson.search.SearchIndexBuilder;
import hudson.security.ACL;
import hudson.security.Permission;
import hudson.slaves.Cloud;
import hudson.slaves.WorkspaceList;
import hudson.tasks.BuildStep;
import hudson.tasks.BuildStepDescriptor;
import hudson.tasks.BuildTrigger;
import hudson.tasks.BuildWrapperDescriptor;
import hudson.tasks.Publisher;
import hudson.triggers.SCMTrigger;
import hudson.triggers.Trigger;
import hudson.triggers.TriggerDescriptor;
import hudson.util.AlternativeUiTextProvider;
import hudson.util.AlternativeUiTextProvider.Message;
import hudson.util.DescribableList;
import hudson.util.FormValidation;
import hudson.util.TimeUnit2;
import hudson.widgets.HistoryWidget;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.Vector;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.CheckForNull;
import javax.annotation.Nonnull;
import javax.servlet.ServletException;
import jenkins.model.BlockedBecauseOfBuildInProgress;
import jenkins.model.Jenkins;
import jenkins.model.JenkinsLocationConfiguration;
import jenkins.model.ParameterizedJobMixIn;
import jenkins.model.Uptime;
import jenkins.model.lazy.LazyBuildMixIn;
import jenkins.scm.DefaultSCMCheckoutStrategyImpl;
import jenkins.scm.SCMCheckoutStrategy;
import jenkins.scm.SCMCheckoutStrategyDescriptor;
import jenkins.scm.SCMDecisionHandler;
import jenkins.util.TimeDuration;
import net.sf.json.JSONObject;
import org.acegisecurity.Authentication;
import org.jenkinsci.bytecode.AdaptField;
import org.kohsuke.accmod.Restricted;
import org.kohsuke.accmod.restrictions.DoNotUse;
import org.kohsuke.accmod.restrictions.NoExternalUse;
import org.kohsuke.args4j.Argument;
import org.kohsuke.args4j.CmdLineException;
import org.kohsuke.stapler.AncestorInPath;
import org.kohsuke.stapler.ForwardToView;
import org.kohsuke.stapler.HttpRedirect;
import org.kohsuke.stapler.HttpResponse;
import org.kohsuke.stapler.QueryParameter;
import org.kohsuke.stapler.StaplerRequest;
import org.kohsuke.stapler.StaplerResponse;
import org.kohsuke.stapler.export.Exported;
import org.kohsuke.stapler.interceptor.RequirePOST;
/**
* Base implementation of {@link Job}s that build software.
*
* For now this is primarily the common part of {@link Project} and MavenModule.
*
* @author Kohsuke Kawaguchi
* @see AbstractBuild
*/
@SuppressWarnings("rawtypes")
public abstract class AbstractProject<P extends AbstractProject<P,R>,R extends AbstractBuild<P,R>> extends Job<P,R> implements BuildableItem, LazyBuildMixIn.LazyLoadingJob<P,R>, ParameterizedJobMixIn.ParameterizedJob {
/**
* {@link SCM} associated with the project.
* To allow derived classes to link {@link SCM} config to elsewhere,
* access to this variable should always go through {@link #getScm()}.
*/
private volatile SCM scm = new NullSCM();
/**
* Controls how the checkout is done.
*/
private volatile SCMCheckoutStrategy scmCheckoutStrategy;
/**
* State returned from {@link SCM#poll(AbstractProject, Launcher, FilePath, TaskListener, SCMRevisionState)}.
*/
private volatile transient SCMRevisionState pollingBaseline = null;
private transient LazyBuildMixIn<P,R> buildMixIn;
/**
* All the builds keyed by their build number.
* Kept here for binary compatibility only; otherwise use {@link #buildMixIn}.
* External code should use {@link #getBuildByNumber(int)} or {@link #getLastBuild()} and traverse via
* {@link Run#getPreviousBuild()}
*/
@Restricted(NoExternalUse.class)
protected transient RunMap<R> builds;
/**
* The quiet period. Null to delegate to the system default.
*/
private volatile Integer quietPeriod = null;
/**
* The retry count. Null to delegate to the system default.
*/
private volatile Integer scmCheckoutRetryCount = null;
/**
* If this project is configured to be only built on a certain label,
* this value will be set to that label.
*
* For historical reasons, this is called 'assignedNode'. Also for
* a historical reason, null to indicate the affinity
* with the master node.
*
* @see #canRoam
*/
private String assignedNode;
/**
* True if this project can be built on any node.
*
* <p>
* This somewhat ugly flag combination is so that we can migrate
* existing Hudson installations nicely.
*/
private volatile boolean canRoam;
/**
* True to suspend new builds.
*/
protected volatile boolean disabled;
/**
* True to keep builds of this project in queue when downstream projects are
* building. False by default to keep from breaking existing behavior.
*/
protected volatile boolean blockBuildWhenDownstreamBuilding = false;
/**
* True to keep builds of this project in queue when upstream projects are
* building. False by default to keep from breaking existing behavior.
*/
protected volatile boolean blockBuildWhenUpstreamBuilding = false;
/**
* Identifies {@link JDK} to be used.
* Null if no explicit configuration is required.
*
* <p>
* Can't store {@link JDK} directly because {@link Jenkins} and {@link Project}
* are saved independently.
*
* @see Jenkins#getJDK(String)
*/
private volatile String jdk;
private volatile BuildAuthorizationToken authToken = null;
/**
* List of all {@link Trigger}s for this project.
*/
@AdaptField(was=List.class)
protected volatile DescribableList<Trigger<?>,TriggerDescriptor> triggers = new DescribableList<Trigger<?>,TriggerDescriptor>(this);
private static final AtomicReferenceFieldUpdater<AbstractProject,DescribableList> triggersUpdater
= AtomicReferenceFieldUpdater.newUpdater(AbstractProject.class,DescribableList.class,"triggers");
/**
* {@link Action}s contributed from subsidiary objects associated with
* {@link AbstractProject}, such as from triggers, builders, publishers, etc.
*
* We don't want to persist them separately, and these actions
* come and go as configuration change, so it's kept separate.
*/
@CopyOnWrite
protected transient volatile List<Action> transientActions = new Vector<Action>();
private boolean concurrentBuild;
/**
* See {@link #setCustomWorkspace(String)}.
*
* @since 1.410
*/
private String customWorkspace;
protected AbstractProject(ItemGroup parent, String name) {
super(parent,name);
buildMixIn = createBuildMixIn();
builds = buildMixIn.getRunMap();
final Jenkins j = Jenkins.getInstance();
final List<Node> nodes = j != null ? j.getNodes() : null;
if(nodes!=null && !nodes.isEmpty()) {
// if a new job is configured with Hudson that already has agent nodes
// make it roamable by default
canRoam = true;
}
}
private LazyBuildMixIn<P,R> createBuildMixIn() {
return new LazyBuildMixIn<P,R>() {
@SuppressWarnings("unchecked") // untypable
@Override protected P asJob() {
return (P) AbstractProject.this;
}
@Override protected Class<R> getBuildClass() {
return AbstractProject.this.getBuildClass();
}
};
}
@Override public LazyBuildMixIn<P,R> getLazyBuildMixIn() {
return buildMixIn;
}
private ParameterizedJobMixIn<P,R> getParameterizedJobMixIn() {
return new ParameterizedJobMixIn<P,R>() {
@SuppressWarnings("unchecked") // untypable
@Override protected P asJob() {
return (P) AbstractProject.this;
}
};
}
@Override
public synchronized void save() throws IOException {
super.save();
updateTransientActions();
}
@Override
public void onCreatedFromScratch() {
super.onCreatedFromScratch();
buildMixIn.onCreatedFromScratch();
builds = buildMixIn.getRunMap();
// solicit initial contributions, especially from TransientProjectActionFactory
updateTransientActions();
}
@Override
public void onLoad(ItemGroup<? extends Item> parent, String name) throws IOException {
super.onLoad(parent, name);
if (buildMixIn == null) {
buildMixIn = createBuildMixIn();
}
buildMixIn.onLoad(parent, name);
builds = buildMixIn.getRunMap();
triggers().setOwner(this);
for (Trigger t : triggers()) {
try {
t.start(this, Items.currentlyUpdatingByXml());
} catch (Throwable e) {
LOGGER.log(Level.WARNING, "could not start trigger while loading project '" + getFullName() + "'", e);
}
}
if(scm==null)
scm = new NullSCM(); // perhaps it was pointing to a plugin that no longer exists.
if(transientActions==null)
transientActions = new Vector<Action>(); // happens when loaded from disk
updateTransientActions();
}
@WithBridgeMethods(List.class)
protected DescribableList<Trigger<?>,TriggerDescriptor> triggers() {
if (triggers == null) {
triggersUpdater.compareAndSet(this,null,new DescribableList<Trigger<?>,TriggerDescriptor>(this));
}
return triggers;
}
@Override
public EnvVars getEnvironment(Node node, TaskListener listener) throws IOException, InterruptedException {
EnvVars env = super.getEnvironment(node, listener);
JDK jdkTool = getJDK();
if (jdkTool != null) {
if (node != null) { // just in case were not in a build
jdkTool = jdkTool.forNode(node, listener);
}
jdkTool.buildEnvVars(env);
} else if (!JDK.isDefaultName(jdk)) {
listener.getLogger().println("No JDK named ‘" + jdk + "’ found");
}
return env;
}
@Override
protected void performDelete() throws IOException, InterruptedException {
// prevent a new build while a delete operation is in progress
makeDisabled(true);
FilePath ws = getWorkspace();
if(ws!=null) {
Node on = getLastBuiltOn();
getScm().processWorkspaceBeforeDeletion(this, ws, on);
if(on!=null)
on.getFileSystemProvisioner().discardWorkspace(this,ws);
}
super.performDelete();
}
/**
* Does this project perform concurrent builds?
* @since 1.319
*/
@Exported
public boolean isConcurrentBuild() {
return concurrentBuild;
}
public void setConcurrentBuild(boolean b) throws IOException {
concurrentBuild = b;
save();
}
/**
* If this project is configured to be always built on this node,
* return that {@link Node}. Otherwise null.
*/
public @CheckForNull Label getAssignedLabel() {
if(canRoam)
return null;
if(assignedNode==null)
return Jenkins.getInstance().getSelfLabel();
return Jenkins.getInstance().getLabel(assignedNode);
}
/**
* Set of labels relevant to this job.
*
* This method is used to determine what agents are relevant to jobs, for example by {@link View}s.
* It does not affect the scheduling. This information is informational and the best-effort basis.
*
* @since 1.456
* @return
* Minimally it should contain {@link #getAssignedLabel()}. The set can contain null element
* to correspond to the null return value from {@link #getAssignedLabel()}.
*/
public Set<Label> getRelevantLabels() {
return Collections.singleton(getAssignedLabel());
}
/**
* Gets the textual representation of the assigned label as it was entered by the user.
*/
public String getAssignedLabelString() {
if (canRoam || assignedNode==null) return null;
try {
LabelExpression.parseExpression(assignedNode);
return assignedNode;
} catch (ANTLRException e) {
// must be old label or host name that includes whitespace or other unsafe chars
return LabelAtom.escape(assignedNode);
}
}
/**
* Sets the assigned label.
*/
public void setAssignedLabel(Label l) throws IOException {
if(l==null) {
canRoam = true;
assignedNode = null;
} else {
canRoam = false;
if(l== Jenkins.getInstance().getSelfLabel()) assignedNode = null;
else assignedNode = l.getExpression();
}
save();
}
/**
* Assigns this job to the given node. A convenience method over {@link #setAssignedLabel(Label)}.
*/
public void setAssignedNode(Node l) throws IOException {
setAssignedLabel(l.getSelfLabel());
}
/**
* Get the term used in the UI to represent this kind of {@link AbstractProject}.
* Must start with a capital letter.
*/
@Override
public String getPronoun() {
return AlternativeUiTextProvider.get(PRONOUN, this,Messages.AbstractProject_Pronoun());
}
/**
* Gets the human readable display name to be rendered in the "Build Now" link.
*
* @since 1.401
*/
public String getBuildNowText() {
// For compatibility, still use the deprecated replacer if specified.
return AlternativeUiTextProvider.get(BUILD_NOW_TEXT, this, getParameterizedJobMixIn().getBuildNowText());
}
/**
* Gets the nearest ancestor {@link TopLevelItem} that's also an {@link AbstractProject}.
*
* <p>
* Some projects (such as matrix projects, Maven projects, or promotion processes) form a tree of jobs
* that acts as a single unit. This method can be used to find the top most dominating job that
* covers such a tree.
*
* @return never null.
* @see AbstractBuild#getRootBuild()
*/
public AbstractProject<?,?> getRootProject() {
if (this instanceof TopLevelItem) {
return this;
} else {
ItemGroup p = this.getParent();
if (p instanceof AbstractProject)
return ((AbstractProject) p).getRootProject();
return this;
}
}
/**
* Gets the directory where the module is checked out.
*
* @return
* null if the workspace is on an agent that's not connected.
* @deprecated as of 1.319
* To support concurrent builds of the same project, this method is moved to {@link AbstractBuild}.
* For backward compatibility, this method returns the right {@link AbstractBuild#getWorkspace()} if called
* from {@link Executor}, and otherwise the workspace of the last build.
*
* <p>
* If you are calling this method during a build from an executor, switch it to {@link AbstractBuild#getWorkspace()}.
* If you are calling this method to serve a file from the workspace, doing a form validation, etc., then
* use {@link #getSomeWorkspace()}
*/
@Deprecated
public final FilePath getWorkspace() {
AbstractBuild b = getBuildForDeprecatedMethods();
return b != null ? b.getWorkspace() : null;
}
/**
* Various deprecated methods in this class all need the 'current' build. This method returns
* the build suitable for that purpose.
*
* @return An AbstractBuild for deprecated methods to use.
*/
private AbstractBuild getBuildForDeprecatedMethods() {
Executor e = Executor.currentExecutor();
if(e!=null) {
Executable exe = e.getCurrentExecutable();
if (exe instanceof AbstractBuild) {
AbstractBuild b = (AbstractBuild) exe;
if(b.getProject()==this)
return b;
}
}
R lb = getLastBuild();
if(lb!=null) return lb;
return null;
}
/**
* Gets a workspace for some build of this project.
*
* <p>
* This is useful for obtaining a workspace for the purpose of form field validation, where exactly
* which build the workspace belonged is less important. The implementation makes a cursory effort
* to find some workspace.
*
* @return
* null if there's no available workspace.
* @since 1.319
*/
public final @CheckForNull FilePath getSomeWorkspace() {
R b = getSomeBuildWithWorkspace();
if (b!=null) return b.getWorkspace();
for (WorkspaceBrowser browser : ExtensionList.lookup(WorkspaceBrowser.class)) {
FilePath f = browser.getWorkspace(this);
if (f != null) return f;
}
return null;
}
/**
* Gets some build that has a live workspace.
*
* @return null if no such build exists.
*/
public final R getSomeBuildWithWorkspace() {
int cnt=0;
for (R b = getLastBuild(); cnt<5 && b!=null; b=b.getPreviousBuild()) {
FilePath ws = b.getWorkspace();
if (ws!=null) return b;
}
return null;
}
private R getSomeBuildWithExistingWorkspace() throws IOException, InterruptedException {
int cnt=0;
for (R b = getLastBuild(); cnt<5 && b!=null; b=b.getPreviousBuild()) {
FilePath ws = b.getWorkspace();
if (ws!=null && ws.exists()) return b;
}
return null;
}
/**
* Returns the root directory of the checked-out module.
* <p>
* This is usually where <tt>pom.xml</tt>, <tt>build.xml</tt>
* and so on exists.
*
* @deprecated as of 1.319
* See {@link #getWorkspace()} for a migration strategy.
*/
@Deprecated
public FilePath getModuleRoot() {
AbstractBuild b = getBuildForDeprecatedMethods();
return b != null ? b.getModuleRoot() : null;
}
/**
* Returns the root directories of all checked-out modules.
* <p>
* Some SCMs support checking out multiple modules into the same workspace.
* In these cases, the returned array will have a length greater than one.
* @return The roots of all modules checked out from the SCM.
*
* @deprecated as of 1.319
* See {@link #getWorkspace()} for a migration strategy.
*/
@Deprecated
public FilePath[] getModuleRoots() {
AbstractBuild b = getBuildForDeprecatedMethods();
return b != null ? b.getModuleRoots() : null;
}
public int getQuietPeriod() {
return quietPeriod!=null ? quietPeriod : Jenkins.getInstance().getQuietPeriod();
}
public SCMCheckoutStrategy getScmCheckoutStrategy() {
return scmCheckoutStrategy == null ? new DefaultSCMCheckoutStrategyImpl() : scmCheckoutStrategy;
}
public void setScmCheckoutStrategy(SCMCheckoutStrategy scmCheckoutStrategy) throws IOException {
this.scmCheckoutStrategy = scmCheckoutStrategy;
save();
}
public int getScmCheckoutRetryCount() {
return scmCheckoutRetryCount !=null ? scmCheckoutRetryCount : Jenkins.getInstance().getScmCheckoutRetryCount();
}
// ugly name because of EL
public boolean getHasCustomQuietPeriod() {
return quietPeriod!=null;
}
/**
* Sets the custom quiet period of this project, or revert to the global default if null is given.
*/
public void setQuietPeriod(Integer seconds) throws IOException {
this.quietPeriod = seconds;
save();
}
public boolean hasCustomScmCheckoutRetryCount(){
return scmCheckoutRetryCount != null;
}
@Override
public boolean isBuildable() {
return !isDisabled() && !isHoldOffBuildUntilSave();
}
/**
* Used in <tt>sidepanel.jelly</tt> to decide whether to display
* the config/delete/build links.
*/
public boolean isConfigurable() {
return true;
}
public boolean blockBuildWhenDownstreamBuilding() {
return blockBuildWhenDownstreamBuilding;
}
public void setBlockBuildWhenDownstreamBuilding(boolean b) throws IOException {
blockBuildWhenDownstreamBuilding = b;
save();
}
public boolean blockBuildWhenUpstreamBuilding() {
return blockBuildWhenUpstreamBuilding;
}
public void setBlockBuildWhenUpstreamBuilding(boolean b) throws IOException {
blockBuildWhenUpstreamBuilding = b;
save();
}
public boolean isDisabled() {
return disabled;
}
/**
* Validates the retry count Regex
*/
public FormValidation doCheckRetryCount(@QueryParameter String value)throws IOException,ServletException{
// retry count is optional so this is ok
if(value == null || value.trim().equals(""))
return FormValidation.ok();
if (!value.matches("[0-9]*")) {
return FormValidation.error("Invalid retry count");
}
return FormValidation.ok();
}
/**
* Marks the build as disabled.
* The method will ignore the disable command if {@link #supportsMakeDisabled()}
* returns false. The enable command will be executed in any case.
* @param b true - disable, false - enable
* @since 1.585 Do not disable projects if {@link #supportsMakeDisabled()} returns false
*/
public void makeDisabled(boolean b) throws IOException {
if(disabled==b) return; // noop
if (b && !supportsMakeDisabled()) return; // do nothing if the disabling is unsupported
this.disabled = b;
if(b)
Jenkins.getInstance().getQueue().cancel(this);
save();
ItemListener.fireOnUpdated(this);
}
/**
* Specifies whether this project may be disabled by the user.
* By default, it can be only if this is a {@link TopLevelItem};
* would be false for matrix configurations, etc.
* @return true if the GUI should allow {@link #doDisable} and the like
* @since 1.475
*/
public boolean supportsMakeDisabled() {
return this instanceof TopLevelItem;
}
public void disable() throws IOException {
makeDisabled(true);
}
public void enable() throws IOException {
makeDisabled(false);
}
@Override
public BallColor getIconColor() {
if(isDisabled())
return isBuilding() ? BallColor.DISABLED_ANIME : BallColor.DISABLED;
else
return super.getIconColor();
}
/**
* effectively deprecated. Since using updateTransientActions correctly
* under concurrent environment requires a lock that can too easily cause deadlocks.
*
* <p>
* Override {@link #createTransientActions()} instead.
*/
protected void updateTransientActions() {
transientActions = createTransientActions();
}
protected List<Action> createTransientActions() {
Vector<Action> ta = new Vector<Action>();
for (JobProperty<? super P> p : Util.fixNull(properties))
ta.addAll(p.getJobActions((P)this));
for (TransientProjectActionFactory tpaf : TransientProjectActionFactory.all()) {
try {
ta.addAll(Util.fixNull(tpaf.createFor(this))); // be defensive against null
} catch (Exception e) {
LOGGER.log(Level.SEVERE, "Could not load actions from " + tpaf + " for " + this, e);
}
}
return ta;
}
/**
* Returns the live list of all {@link Publisher}s configured for this project.
*
* <p>
* This method couldn't be called <tt>getPublishers()</tt> because existing methods
* in sub-classes return different inconsistent types.
*/
public abstract DescribableList<Publisher,Descriptor<Publisher>> getPublishersList();
@Override
public void addProperty(JobProperty<? super P> jobProp) throws IOException {
super.addProperty(jobProp);
updateTransientActions();
}
public List<ProminentProjectAction> getProminentActions() {
return getActions(ProminentProjectAction.class);
}
@Override
@RequirePOST
public void doConfigSubmit( StaplerRequest req, StaplerResponse rsp ) throws IOException, ServletException, FormException {
super.doConfigSubmit(req,rsp);
updateTransientActions();
// notify the queue as the project might be now tied to different node
Jenkins.getInstance().getQueue().scheduleMaintenance();
// this is to reflect the upstream build adjustments done above
Jenkins.getInstance().rebuildDependencyGraphAsync();
}
/**
* @deprecated
* Use {@link #scheduleBuild(Cause)}. Since 1.283
*/
@Deprecated
public boolean scheduleBuild() {
return getParameterizedJobMixIn().scheduleBuild();
}
/**
* @deprecated
* Use {@link #scheduleBuild(int, Cause)}. Since 1.283
*/
@Deprecated
public boolean scheduleBuild(int quietPeriod) {
return getParameterizedJobMixIn().scheduleBuild(quietPeriod);
}
/**
* Schedules a build of this project.
*
* @return
* true if the project is added to the queue.
* false if the task was rejected from the queue (such as when the system is being shut down.)
*/
public boolean scheduleBuild(Cause c) {
return getParameterizedJobMixIn().scheduleBuild(c);
}
public boolean scheduleBuild(int quietPeriod, Cause c) {
return getParameterizedJobMixIn().scheduleBuild(quietPeriod, c);
}
/**
* Schedules a build.
*
* Important: the actions should be persistable without outside references (e.g. don't store
* references to this project). To provide parameters for a parameterized project, add a ParametersAction. If
* no ParametersAction is provided for such a project, one will be created with the default parameter values.
*
* @param quietPeriod the quiet period to observer
* @param c the cause for this build which should be recorded
* @param actions a list of Actions that will be added to the build
* @return whether the build was actually scheduled
*/
public boolean scheduleBuild(int quietPeriod, Cause c, Action... actions) {
return scheduleBuild2(quietPeriod,c,actions)!=null;
}
/**
* Schedules a build of this project, and returns a {@link Future} object
* to wait for the completion of the build.
*
* @param actions
* For the convenience of the caller, this array can contain null, and those will be silently ignored.
*/
@WithBridgeMethods(Future.class)
public QueueTaskFuture<R> scheduleBuild2(int quietPeriod, Cause c, Action... actions) {
return scheduleBuild2(quietPeriod,c,Arrays.asList(actions));
}
/**
* Schedules a build of this project, and returns a {@link Future} object
* to wait for the completion of the build.
*
* @param actions
* For the convenience of the caller, this collection can contain null, and those will be silently ignored.
* @since 1.383
*/
@SuppressWarnings("unchecked")
@WithBridgeMethods(Future.class)
public QueueTaskFuture<R> scheduleBuild2(int quietPeriod, Cause c, Collection<? extends Action> actions) {
List<Action> queueActions = new ArrayList<Action>(actions);
if (c != null) {
queueActions.add(new CauseAction(c));
}
return getParameterizedJobMixIn().scheduleBuild2(quietPeriod, queueActions.toArray(new Action[queueActions.size()]));
}
/**
* Schedules a build, and returns a {@link Future} object
* to wait for the completion of the build.
*
* <p>
* Production code shouldn't be using this, but for tests this is very convenient, so this isn't marked
* as deprecated.
*/
@SuppressWarnings("deprecation")
@WithBridgeMethods(Future.class)
public QueueTaskFuture<R> scheduleBuild2(int quietPeriod) {
return scheduleBuild2(quietPeriod, new LegacyCodeCause());
}
/**
* Schedules a build of this project, and returns a {@link Future} object
* to wait for the completion of the build.
*/
@WithBridgeMethods(Future.class)
public QueueTaskFuture<R> scheduleBuild2(int quietPeriod, Cause c) {
return scheduleBuild2(quietPeriod, c, new Action[0]);
}
/**
* Schedules a polling of this project.
*/
public boolean schedulePolling() {
if(isDisabled()) return false;
SCMTrigger scmt = getTrigger(SCMTrigger.class);
if(scmt==null) return false;
scmt.run();
return true;
}
/**
* Returns true if the build is in the queue.
*/
@Override
public boolean isInQueue() {
return Jenkins.getInstance().getQueue().contains(this);
}
@Override
public Queue.Item getQueueItem() {
return Jenkins.getInstance().getQueue().getItem(this);
}
/**
* Gets the JDK that this project is configured with, or null.
*/
public JDK getJDK() {
return Jenkins.getInstance().getJDK(jdk);
}
/**
* Overwrites the JDK setting.
*/
public void setJDK(JDK jdk) throws IOException {
this.jdk = jdk.getName();
save();
}
public BuildAuthorizationToken getAuthToken() {
return authToken;
}
@Override
public RunMap<R> _getRuns() {
return buildMixIn._getRuns();
}
@Override
public void removeRun(R run) {
buildMixIn.removeRun(run);
}
/**
* {@inheritDoc}
*
* More efficient implementation.
*/
@Override
public R getBuild(String id) {
return buildMixIn.getBuild(id);
}
/**
* {@inheritDoc}
*
* More efficient implementation.
*/
@Override
public R getBuildByNumber(int n) {
return buildMixIn.getBuildByNumber(n);
}
/**
* {@inheritDoc}
*
* More efficient implementation.
*/
@Override
public R getFirstBuild() {
return buildMixIn.getFirstBuild();
}
@Override
public @CheckForNull R getLastBuild() {
return buildMixIn.getLastBuild();
}
@Override
public R getNearestBuild(int n) {
return buildMixIn.getNearestBuild(n);
}
@Override
public R getNearestOldBuild(int n) {
return buildMixIn.getNearestOldBuild(n);
}
/**
* Type token for the corresponding build type.
* The build class must have two constructors:
* one taking this project type;
* and one taking this project type, then {@link File}.
*/
protected abstract Class<R> getBuildClass();
/**
* Creates a new build of this project for immediate execution.
*/
protected synchronized R newBuild() throws IOException {
return buildMixIn.newBuild();
}
/**
* Loads an existing build record from disk.
*/
protected R loadBuild(File dir) throws IOException {
return buildMixIn.loadBuild(dir);
}
/**
* {@inheritDoc}
*
* <p>
* Note that this method returns a read-only view of {@link Action}s.
* {@link BuildStep}s and others who want to add a project action
* should do so by implementing {@link BuildStep#getProjectActions(AbstractProject)}.
*
* @see TransientProjectActionFactory
*/
@SuppressWarnings("deprecation")
@Override
public List<Action> getActions() {
// add all the transient actions, too
List<Action> actions = new Vector<Action>(super.getActions());
actions.addAll(transientActions);
// return the read only list to cause a failure on plugins who try to add an action here
return Collections.unmodifiableList(actions);
}
// TODO implement addAction, addOrReplaceAction, removeAction, removeActions, replaceActions
/**
* Gets the {@link Node} where this project was last built on.
*
* @return
* null if no information is available (for example,
* if no build was done yet.)
*/
public Node getLastBuiltOn() {
// where was it built on?
AbstractBuild b = getLastBuild();
if(b==null)
return null;
else
return b.getBuiltOn();
}
public Object getSameNodeConstraint() {
return this; // in this way, any member that wants to run with the main guy can nominate the project itself
}
public final Task getOwnerTask() {
return this;
}
@Nonnull
public Authentication getDefaultAuthentication() {
// backward compatible behaviour.
return ACL.SYSTEM;
}
@Nonnull
@Override
public Authentication getDefaultAuthentication(Queue.Item item) {
return getDefaultAuthentication();
}
/**
* {@inheritDoc}
*
* <p>
* A project must be blocked if its own previous build is in progress,
* or if the blockBuildWhenUpstreamBuilding option is true and an upstream
* project is building, but derived classes can also check other conditions.
*/
@Override
public boolean isBuildBlocked() {
return getCauseOfBlockage()!=null;
}
public String getWhyBlocked() {
CauseOfBlockage cb = getCauseOfBlockage();
return cb!=null ? cb.getShortDescription() : null;
}
/**
* @deprecated use {@link BlockedBecauseOfBuildInProgress} instead.
*/
@Deprecated
public static class BecauseOfBuildInProgress extends BlockedBecauseOfBuildInProgress {
public BecauseOfBuildInProgress(@Nonnull AbstractBuild<?, ?> build) {
super(build);
}
}
/**
* Because the downstream build is in progress, and we are configured to wait for that.
*/
public static class BecauseOfDownstreamBuildInProgress extends CauseOfBlockage {
public final AbstractProject<?,?> up;
public BecauseOfDownstreamBuildInProgress(AbstractProject<?,?> up) {
this.up = up;
}
@Override
public String getShortDescription() {
return Messages.AbstractProject_DownstreamBuildInProgress(up.getName());
}
}
/**
* Because the upstream build is in progress, and we are configured to wait for that.
*/
public static class BecauseOfUpstreamBuildInProgress extends CauseOfBlockage {
public final AbstractProject<?,?> up;
public BecauseOfUpstreamBuildInProgress(AbstractProject<?,?> up) {
this.up = up;
}
@Override
public String getShortDescription() {
return Messages.AbstractProject_UpstreamBuildInProgress(up.getName());
}
}
@Override
public CauseOfBlockage getCauseOfBlockage() {
// Block builds until they are done with post-production
if (isLogUpdated() && !isConcurrentBuild()) {
final R lastBuild = getLastBuild();
if (lastBuild != null) {
return new BlockedBecauseOfBuildInProgress(lastBuild);
} else {
// The build has been likely deleted after the isLogUpdated() call.
// Another cause may be an API implementation glitсh in the implementation for AbstractProject.
// Anyway, we should let the code go then.
LOGGER.log(Level.FINE, "The last build has been deleted during the non-concurrent cause creation. The build is not blocked anymore");
}
}
if (blockBuildWhenDownstreamBuilding()) {
AbstractProject<?,?> bup = getBuildingDownstream();
if (bup!=null)
return new BecauseOfDownstreamBuildInProgress(bup);
}
if (blockBuildWhenUpstreamBuilding()) {
AbstractProject<?,?> bup = getBuildingUpstream();
if (bup!=null)
return new BecauseOfUpstreamBuildInProgress(bup);
}
return null;
}
/**
* Returns the project if any of the downstream project is either
* building, waiting, pending or buildable.
* <p>
* This means eventually there will be an automatic triggering of
* the given project (provided that all builds went smoothly.)
*/
public AbstractProject getBuildingDownstream() {
Set<Task> unblockedTasks = Jenkins.getInstance().getQueue().getUnblockedTasks();
for (AbstractProject tup : getTransitiveDownstreamProjects()) {
if (tup!=this && (tup.isBuilding() || unblockedTasks.contains(tup)))
return tup;
}
return null;
}
/**
* Returns the project if any of the upstream project is either
* building or is in the queue.
* <p>
* This means eventually there will be an automatic triggering of
* the given project (provided that all builds went smoothly.)
*/
public AbstractProject getBuildingUpstream() {
Set<Task> unblockedTasks = Jenkins.getInstance().getQueue().getUnblockedTasks();
for (AbstractProject tup : getTransitiveUpstreamProjects()) {
if (tup!=this && (tup.isBuilding() || unblockedTasks.contains(tup)))
return tup;
}
return null;
}
public List<SubTask> getSubTasks() {
List<SubTask> r = new ArrayList<SubTask>();
r.add(this);
for (SubTaskContributor euc : SubTaskContributor.all())
r.addAll(euc.forProject(this));
for (JobProperty<? super P> p : properties)
r.addAll(p.getSubTasks());
return r;
}
public @CheckForNull R createExecutable() throws IOException {
if(isDisabled()) return null;
return newBuild();
}
public void checkAbortPermission() {
checkPermission(CANCEL);
}
public boolean hasAbortPermission() {
return hasPermission(CANCEL);
}
/**
* Gets the {@link Resource} that represents the workspace of this project.
* Useful for locking and mutual exclusion control.
*
* @deprecated as of 1.319
* Projects no longer have a fixed workspace, ands builds will find an available workspace via
* {@link WorkspaceList} for each build (furthermore, that happens after a build is started.)
* So a {@link Resource} representation for a workspace at the project level no longer makes sense.
*
* <p>
* If you need to lock a workspace while you do some computation, see the source code of
* {@link #pollSCMChanges(TaskListener)} for how to obtain a lock of a workspace through {@link WorkspaceList}.
*/
@Deprecated
public Resource getWorkspaceResource() {
return new Resource(getFullDisplayName()+" workspace");
}
/**
* List of necessary resources to perform the build of this project.
*/
public ResourceList getResourceList() {
final Set<ResourceActivity> resourceActivities = getResourceActivities();
final List<ResourceList> resourceLists = new ArrayList<ResourceList>(1 + resourceActivities.size());
for (ResourceActivity activity : resourceActivities) {
if (activity != this && activity != null) {
// defensive infinite recursion and null check
resourceLists.add(activity.getResourceList());
}
}
return ResourceList.union(resourceLists);
}
/**
* Set of child resource activities of the build of this project (override in child projects).
* @return The set of child resource activities of the build of this project.
*/
protected Set<ResourceActivity> getResourceActivities() {
return Collections.emptySet();
}
public boolean checkout(AbstractBuild build, Launcher launcher, BuildListener listener, File changelogFile) throws IOException, InterruptedException {
SCM scm = getScm();
if(scm==null)
return true; // no SCM
FilePath workspace = build.getWorkspace();
workspace.mkdirs();
boolean r = scm.checkout(build, launcher, workspace, listener, changelogFile);
if (r) {
// Only calcRevisionsFromBuild if checkout was successful. Note that modern SCM implementations
// won't reach this line anyway, as they throw AbortExceptions on checkout failure.
calcPollingBaseline(build, launcher, listener);
}
return r;
}
/**
* Pushes the baseline up to the newly checked out revision.
*/
private void calcPollingBaseline(AbstractBuild build, Launcher launcher, TaskListener listener) throws IOException, InterruptedException {
SCMRevisionState baseline = build.getAction(SCMRevisionState.class);
if (baseline==null) {
try {
baseline = getScm().calcRevisionsFromBuild(build, launcher, listener);
} catch (AbstractMethodError e) {
baseline = SCMRevisionState.NONE; // pre-1.345 SCM implementations, which doesn't use the baseline in polling
}
if (baseline!=null)
build.addAction(baseline);
}
pollingBaseline = baseline;
}
/**
* Checks if there's any update in SCM, and returns true if any is found.
*
* @deprecated as of 1.346
* Use {@link #poll(TaskListener)} instead.
*/
@Deprecated
public boolean pollSCMChanges( TaskListener listener ) {
return poll(listener).hasChanges();
}
/**
* Checks if there's any update in SCM, and returns true if any is found.
*
* <p>
* The implementation is responsible for ensuring mutual exclusion between polling and builds
* if necessary.
*
* @since 1.345
*/
public PollingResult poll( TaskListener listener ) {
SCM scm = getScm();
if (scm==null) {
listener.getLogger().println(Messages.AbstractProject_NoSCM());
return NO_CHANGES;
}
if (!isBuildable()) {
listener.getLogger().println(Messages.AbstractProject_Disabled());
return NO_CHANGES;
}
SCMDecisionHandler veto = SCMDecisionHandler.firstShouldPollVeto(this);
if (veto != null) {
listener.getLogger().println(Messages.AbstractProject_PollingVetoed(veto));
return NO_CHANGES;
}
R lb = getLastBuild();
if (lb==null) {
listener.getLogger().println(Messages.AbstractProject_NoBuilds());
return isInQueue() ? NO_CHANGES : BUILD_NOW;
}
if (pollingBaseline==null) {
R success = getLastSuccessfulBuild(); // if we have a persisted baseline, we'll find it by this
for (R r=lb; r!=null; r=r.getPreviousBuild()) {
SCMRevisionState s = r.getAction(SCMRevisionState.class);
if (s!=null) {
pollingBaseline = s;
break;
}
if (r==success) break; // searched far enough
}
// NOTE-NO-BASELINE:
// if we don't have baseline yet, it means the data is built by old Hudson that doesn't set the baseline
// as action, so we need to compute it. This happens later.
}
try {
SCMPollListener.fireBeforePolling(this, listener);
PollingResult r = _poll(listener, scm);
SCMPollListener.firePollingSuccess(this,listener, r);
return r;
} catch (AbortException e) {
listener.getLogger().println(e.getMessage());
listener.fatalError(Messages.AbstractProject_Aborted());
LOGGER.log(Level.FINE, "Polling "+this+" aborted",e);
SCMPollListener.firePollingFailed(this, listener,e);
return NO_CHANGES;
} catch (IOException e) {
Functions.printStackTrace(e, listener.fatalError(e.getMessage()));
SCMPollListener.firePollingFailed(this, listener,e);
return NO_CHANGES;
} catch (InterruptedException e) {
Functions.printStackTrace(e, listener.fatalError(Messages.AbstractProject_PollingABorted()));
SCMPollListener.firePollingFailed(this, listener,e);
return NO_CHANGES;
} catch (RuntimeException e) {
SCMPollListener.firePollingFailed(this, listener,e);
throw e;
} catch (Error e) {
SCMPollListener.firePollingFailed(this, listener,e);
throw e;
}
}
/**
* {@link #poll(TaskListener)} method without the try/catch block that does listener notification and .
*/
private PollingResult _poll(TaskListener listener, SCM scm) throws IOException, InterruptedException {
if (scm.requiresWorkspaceForPolling()) {
R b = getSomeBuildWithExistingWorkspace();
if (b == null) b = getLastBuild();
// lock the workspace for the given build
FilePath ws=b.getWorkspace();
WorkspaceOfflineReason workspaceOfflineReason = workspaceOffline( b );
if ( workspaceOfflineReason != null ) {
// workspace offline
for (WorkspaceBrowser browser : ExtensionList.lookup(WorkspaceBrowser.class)) {
ws = browser.getWorkspace(this);
if (ws != null) {
return pollWithWorkspace(listener, scm, b, ws, browser.getWorkspaceList());
}
}
// At this point we start thinking about triggering a build just to get a workspace,
// because otherwise there's no way we can detect changes.
// However, first there are some conditions in which we do not want to do so.
// give time for agents to come online if we are right after reconnection (JENKINS-8408)
long running = Jenkins.getInstance().getInjector().getInstance(Uptime.class).getUptime();
long remaining = TimeUnit2.MINUTES.toMillis(10)-running;
if (remaining>0 && /* this logic breaks tests of polling */!Functions.getIsUnitTest()) {
listener.getLogger().print(Messages.AbstractProject_AwaitingWorkspaceToComeOnline(remaining/1000));
listener.getLogger().println( " (" + workspaceOfflineReason.name() + ")");
return NO_CHANGES;
}
// Do not trigger build, if no suitable agent is online
if (workspaceOfflineReason.equals(WorkspaceOfflineReason.all_suitable_nodes_are_offline)) {
// No suitable executor is online
listener.getLogger().print(Messages.AbstractProject_AwaitingWorkspaceToComeOnline(running/1000));
listener.getLogger().println( " (" + workspaceOfflineReason.name() + ")");
return NO_CHANGES;
}
Label label = getAssignedLabel();
if (label != null && label.isSelfLabel()) {
// if the build is fixed on a node, then attempting a build will do us
// no good. We should just wait for the agent to come back.
listener.getLogger().print(Messages.AbstractProject_NoWorkspace());
listener.getLogger().println( " (" + workspaceOfflineReason.name() + ")");
return NO_CHANGES;
}
listener.getLogger().println( ws==null
? Messages.AbstractProject_WorkspaceOffline()
: Messages.AbstractProject_NoWorkspace());
if (isInQueue()) {
listener.getLogger().println(Messages.AbstractProject_AwaitingBuildForWorkspace());
return NO_CHANGES;
}
// build now, or nothing will ever be built
listener.getLogger().print(Messages.AbstractProject_NewBuildForWorkspace());
listener.getLogger().println( " (" + workspaceOfflineReason.name() + ")");
return BUILD_NOW;
} else {
WorkspaceList l = b.getBuiltOn().toComputer().getWorkspaceList();
return pollWithWorkspace(listener, scm, b, ws, l);
}
} else {
// polling without workspace
LOGGER.fine("Polling SCM changes of " + getName());
if (pollingBaseline==null) // see NOTE-NO-BASELINE above
calcPollingBaseline(getLastBuild(),null,listener);
PollingResult r = scm.poll(this, null, null, listener, pollingBaseline);
pollingBaseline = r.remote;
return r;
}
}
private PollingResult pollWithWorkspace(TaskListener listener, SCM scm, R lb, @Nonnull FilePath ws, WorkspaceList l) throws InterruptedException, IOException {
// if doing non-concurrent build, acquire a workspace in a way that causes builds to block for this workspace.
// this prevents multiple workspaces of the same job --- the behavior of Hudson < 1.319.
//
// OTOH, if a concurrent build is chosen, the user is willing to create a multiple workspace,
// so better throughput is achieved over time (modulo the initial cost of creating that many workspaces)
// by having multiple workspaces
Node node = lb.getBuiltOn();
Launcher launcher = ws.createLauncher(listener).decorateByEnv(getEnvironment(node,listener));
WorkspaceList.Lease lease = l.acquire(ws, !concurrentBuild);
try {
String nodeName = node != null ? node.getSelfLabel().getName() : "[node_unavailable]";
listener.getLogger().println("Polling SCM changes on " + nodeName);
LOGGER.fine("Polling SCM changes of " + getName());
if (pollingBaseline==null) // see NOTE-NO-BASELINE above
calcPollingBaseline(lb,launcher,listener);
PollingResult r = scm.poll(this, launcher, ws, listener, pollingBaseline);
pollingBaseline = r.remote;
return r;
} finally {
lease.release();
}
}
enum WorkspaceOfflineReason {
nonexisting_workspace,
builton_node_gone,
builton_node_no_executors,
all_suitable_nodes_are_offline,
use_ondemand_slave
}
/**
* Returns true if all suitable nodes for the job are offline.
*
*/
private boolean isAllSuitableNodesOffline(R build) {
Label label = getAssignedLabel();
List<Node> allNodes = Jenkins.getInstance().getNodes();
if (label != null) {
//Invalid label. Put in queue to make administrator fix
if(label.getNodes().isEmpty()) {
return false;
}
//Returns true, if all suitable nodes are offline
return label.isOffline();
} else {
if(canRoam) {
for (Node n : Jenkins.getInstance().getNodes()) {
Computer c = n.toComputer();
if (c != null && c.isOnline() && c.isAcceptingTasks() && n.getMode() == Mode.NORMAL) {
// Some executor is online that is ready and this job can run anywhere
return false;
}
}
//We can roam, check that the master is set to be used as much as possible, and not tied jobs only.
if(Jenkins.getInstance().getMode() == Mode.EXCLUSIVE) {
return true;
} else {
return false;
}
}
}
return true;
}
private WorkspaceOfflineReason workspaceOffline(R build) throws IOException, InterruptedException {
FilePath ws = build.getWorkspace();
Label label = getAssignedLabel();
if (isAllSuitableNodesOffline(build)) {
Collection<Cloud> applicableClouds = label == null ? Jenkins.getInstance().clouds : label.getClouds();
return applicableClouds.isEmpty() ? WorkspaceOfflineReason.all_suitable_nodes_are_offline : WorkspaceOfflineReason.use_ondemand_slave;
}
if (ws==null || !ws.exists()) {
return WorkspaceOfflineReason.nonexisting_workspace;
}
Node builtOn = build.getBuiltOn();
if (builtOn == null) { // node built-on doesn't exist anymore
return WorkspaceOfflineReason.builton_node_gone;
}
if (builtOn.toComputer() == null) { // node still exists, but has 0 executors - o.s.l.t.
return WorkspaceOfflineReason.builton_node_no_executors;
}
return null;
}
/**
* Returns true if this user has made a commit to this project.
*
* @since 1.191
*/
public boolean hasParticipant(User user) {
for( R build = getLastBuild(); build!=null; build=build.getPreviousBuild())
if(build.hasParticipant(user))
return true;
return false;
}
@Exported
public SCM getScm() {
return scm;
}
public void setScm(SCM scm) throws IOException {
this.scm = scm;
save();
}
/**
* Adds a new {@link Trigger} to this {@link Project} if not active yet.
*/
public void addTrigger(Trigger<?> trigger) throws IOException {
addToList(trigger,triggers());
}
public void removeTrigger(TriggerDescriptor trigger) throws IOException {
removeFromList(trigger,triggers());
}
protected final synchronized <T extends Describable<T>>
void addToList( T item, List<T> collection ) throws IOException {
//No support to replace item in position, remove then add
removeFromList(item.getDescriptor(), collection);
collection.add(item);
save();
updateTransientActions();
}
protected final synchronized <T extends Describable<T>>
void removeFromList(Descriptor<T> item, List<T> collection) throws IOException {
final Iterator<T> iCollection = collection.iterator();
while(iCollection.hasNext()) {
final T next = iCollection.next();
if(next.getDescriptor()==item) {
// found it
iCollection.remove();
save();
updateTransientActions();
return;
}
}
}
@SuppressWarnings("unchecked")
@Override public Map<TriggerDescriptor,Trigger<?>> getTriggers() {
return triggers().toMap();
}
/**
* Gets the specific trigger, or null if the property is not configured for this job.
*/
public <T extends Trigger> T getTrigger(Class<T> clazz) {
for (Trigger p : triggers()) {
if(clazz.isInstance(p))
return clazz.cast(p);
}
return null;
}
//
//
// fingerprint related
//
//
/**
* True if the builds of this project produces {@link Fingerprint} records.
*/
public abstract boolean isFingerprintConfigured();
/**
* Gets the other {@link AbstractProject}s that should be built
* when a build of this project is completed.
*/
public final List<AbstractProject> getDownstreamProjects() {
return Jenkins.getInstance().getDependencyGraph().getDownstream(this);
}
@Exported(name="downstreamProjects")
@Restricted(DoNotUse.class) // only for exporting
public List<AbstractProject> getDownstreamProjectsForApi() {
List<AbstractProject> r = new ArrayList<>();
for (AbstractProject p : getDownstreamProjects()) {
if (p.hasPermission(Item.READ)) {
r.add(p);
}
}
return r;
}
public final List<AbstractProject> getUpstreamProjects() {
return Jenkins.getInstance().getDependencyGraph().getUpstream(this);
}
@Exported(name="upstreamProjects")
@Restricted(DoNotUse.class) // only for exporting
public List<AbstractProject> getUpstreamProjectsForApi() {
List<AbstractProject> r = new ArrayList<>();
for (AbstractProject p : getUpstreamProjects()) {
if (p.hasPermission(Item.READ)) {
r.add(p);
}
}
return r;
}
/**
* Returns only those upstream projects that defines {@link BuildTrigger} to this project.
* This is a subset of {@link #getUpstreamProjects()}
* <p>No longer used in the UI.
* @return A List of upstream projects that has a {@link BuildTrigger} to this project.
*/
public final List<AbstractProject> getBuildTriggerUpstreamProjects() {
ArrayList<AbstractProject> result = new ArrayList<AbstractProject>();
for (AbstractProject<?,?> ap : getUpstreamProjects()) {
BuildTrigger buildTrigger = ap.getPublishersList().get(BuildTrigger.class);
if (buildTrigger != null)
if (buildTrigger.getChildProjects(ap).contains(this))
result.add(ap);
}
return result;
}
/**
* Gets all the upstream projects including transitive upstream projects.
*
* @since 1.138
*/
public final Set<AbstractProject> getTransitiveUpstreamProjects() {
return Jenkins.getInstance().getDependencyGraph().getTransitiveUpstream(this);
}
/**
* Gets all the downstream projects including transitive downstream projects.
*
* @since 1.138
*/
public final Set<AbstractProject> getTransitiveDownstreamProjects() {
return Jenkins.getInstance().getDependencyGraph().getTransitiveDownstream(this);
}
/**
* Gets the dependency relationship map between this project (as the source)
* and that project (as the sink.)
*
* @return
* can be empty but not null. build number of this project to the build
* numbers of that project.
*/
public SortedMap<Integer, RangeSet> getRelationship(AbstractProject that) {
TreeMap<Integer,RangeSet> r = new TreeMap<Integer,RangeSet>(REVERSE_INTEGER_COMPARATOR);
checkAndRecord(that, r, this.getBuilds());
// checkAndRecord(that, r, that.getBuilds());
return r;
}
/**
* Helper method for getDownstreamRelationship.
*
* For each given build, find the build number range of the given project and put that into the map.
*/
private void checkAndRecord(AbstractProject that, TreeMap<Integer, RangeSet> r, Collection<R> builds) {
for (R build : builds) {
RangeSet rs = build.getDownstreamRelationship(that);
if(rs==null || rs.isEmpty())
continue;
int n = build.getNumber();
RangeSet value = r.get(n);
if(value==null)
r.put(n,rs);
else
value.add(rs);
}
}
/**
* Builds the dependency graph.
* Since 1.558, not abstract and by default includes dependencies contributed by {@link #triggers()}.
*/
protected void buildDependencyGraph(DependencyGraph graph) {
triggers().buildDependencyGraph(this, graph);
}
@Override
protected SearchIndexBuilder makeSearchIndex() {
return getParameterizedJobMixIn().extendSearchIndex(super.makeSearchIndex());
}
@Override
protected HistoryWidget createHistoryWidget() {
return buildMixIn.createHistoryWidget();
}
public boolean isParameterized() {
return getParameterizedJobMixIn().isParameterized();
}
//
//
// actions
//
//
/**
* Schedules a new build command.
*/
public void doBuild( StaplerRequest req, StaplerResponse rsp, @QueryParameter TimeDuration delay ) throws IOException, ServletException {
getParameterizedJobMixIn().doBuild(req, rsp, delay);
}
/** @deprecated use {@link #doBuild(StaplerRequest, StaplerResponse, TimeDuration)} */
@Deprecated
public void doBuild(StaplerRequest req, StaplerResponse rsp) throws IOException, ServletException {
doBuild(req, rsp, TimeDuration.fromString(req.getParameter("delay")));
}
/**
* Computes the delay by taking the default value and the override in the request parameter into the account.
*
* @deprecated as of 1.489
* Inject {@link TimeDuration}.
*/
@Deprecated
public int getDelay(StaplerRequest req) throws ServletException {
String delay = req.getParameter("delay");
if (delay==null) return getQuietPeriod();
try {
// TODO: more unit handling
if(delay.endsWith("sec")) delay=delay.substring(0,delay.length()-3);
if(delay.endsWith("secs")) delay=delay.substring(0,delay.length()-4);
return Integer.parseInt(delay);
} catch (NumberFormatException e) {
throw new ServletException("Invalid delay parameter value: "+delay);
}
}
/**
* Supports build trigger with parameters via an HTTP GET or POST.
* Currently only String parameters are supported.
*/
public void doBuildWithParameters(StaplerRequest req, StaplerResponse rsp, @QueryParameter TimeDuration delay) throws IOException, ServletException {
getParameterizedJobMixIn().doBuildWithParameters(req, rsp, delay);
}
/** @deprecated use {@link #doBuildWithParameters(StaplerRequest, StaplerResponse, TimeDuration)} */
@Deprecated
public void doBuildWithParameters(StaplerRequest req, StaplerResponse rsp) throws IOException, ServletException {
doBuildWithParameters(req, rsp, TimeDuration.fromString(req.getParameter("delay")));
}
/**
* Schedules a new SCM polling command.
*/
public void doPolling( StaplerRequest req, StaplerResponse rsp ) throws IOException, ServletException {
BuildAuthorizationToken.checkPermission((Job) this, authToken, req, rsp);
schedulePolling();
rsp.sendRedirect(".");
}
/**
* Cancels a scheduled build.
*/
@RequirePOST
public void doCancelQueue( StaplerRequest req, StaplerResponse rsp ) throws IOException, ServletException {
getParameterizedJobMixIn().doCancelQueue(req, rsp);
}
@Override
protected void submit(StaplerRequest req, StaplerResponse rsp) throws IOException, ServletException, FormException {
super.submit(req,rsp);
JSONObject json = req.getSubmittedForm();
makeDisabled(json.optBoolean("disable"));
jdk = json.optString("jdk", null);
if(json.optBoolean("hasCustomQuietPeriod", json.has("quiet_period"))) {
quietPeriod = json.optInt("quiet_period");
} else {
quietPeriod = null;
}
if(json.optBoolean("hasCustomScmCheckoutRetryCount", json.has("scmCheckoutRetryCount"))) {
scmCheckoutRetryCount = json.optInt("scmCheckoutRetryCount");
} else {
scmCheckoutRetryCount = null;
}
blockBuildWhenDownstreamBuilding = json.optBoolean("blockBuildWhenDownstreamBuilding");
blockBuildWhenUpstreamBuilding = json.optBoolean("blockBuildWhenUpstreamBuilding");
if(req.hasParameter("customWorkspace.directory")) {
// Workaround for JENKINS-25221 while plugins are being updated.
LOGGER.log(Level.WARNING, "label assignment is using legacy 'customWorkspace.directory'");
customWorkspace = Util.fixEmptyAndTrim(req.getParameter("customWorkspace.directory"));
} else if(json.optBoolean("hasCustomWorkspace", json.has("customWorkspace"))) {
customWorkspace = Util.fixEmptyAndTrim(json.optString("customWorkspace"));
} else {
customWorkspace = null;
}
if (json.has("scmCheckoutStrategy"))
scmCheckoutStrategy = req.bindJSON(SCMCheckoutStrategy.class,
json.getJSONObject("scmCheckoutStrategy"));
else
scmCheckoutStrategy = null;
if(json.optBoolean("hasSlaveAffinity", json.has("label"))) {
assignedNode = Util.fixEmptyAndTrim(json.optString("label"));
} else if(req.hasParameter("_.assignedLabelString")) {
// Workaround for JENKINS-25372 while plugin is being updated.
// Keep this condition second for JENKINS-25533
LOGGER.log(Level.WARNING, "label assignment is using legacy '_.assignedLabelString'");
assignedNode = Util.fixEmptyAndTrim(req.getParameter("_.assignedLabelString"));
} else {
assignedNode = null;
}
canRoam = assignedNode==null;
keepDependencies = json.has("keepDependencies");
concurrentBuild = json.optBoolean("concurrentBuild");
authToken = BuildAuthorizationToken.create(req);
setScm(SCMS.parseSCM(req,this));
for (Trigger t : triggers())
t.stop();
triggers.replaceBy(buildDescribable(req, Trigger.for_(this)));
for (Trigger t : triggers())
t.start(this,true);
}
/**
* @deprecated
* As of 1.261. Use {@link #buildDescribable(StaplerRequest, List)} instead.
*/
@Deprecated
protected final <T extends Describable<T>> List<T> buildDescribable(StaplerRequest req, List<? extends Descriptor<T>> descriptors, String prefix) throws FormException, ServletException {
return buildDescribable(req,descriptors);
}
protected final <T extends Describable<T>> List<T> buildDescribable(StaplerRequest req, List<? extends Descriptor<T>> descriptors)
throws FormException, ServletException {
JSONObject data = req.getSubmittedForm();
List<T> r = new Vector<T>();
for (Descriptor<T> d : descriptors) {
String safeName = d.getJsonSafeClassName();
if (req.getParameter(safeName) != null) {
T instance = d.newInstance(req, data.getJSONObject(safeName));
r.add(instance);
}
}
return r;
}
/**
* Serves the workspace files.
*/
public DirectoryBrowserSupport doWs( StaplerRequest req, StaplerResponse rsp ) throws IOException, ServletException, InterruptedException {
checkPermission(Item.WORKSPACE);
FilePath ws = getSomeWorkspace();
if ((ws == null) || (!ws.exists())) {
// if there's no workspace, report a nice error message
// Would be good if when asked for *plain*, do something else!
// (E.g. return 404, or send empty doc.)
// Not critical; client can just check if content type is not text/plain,
// which also serves to detect old versions of Hudson.
req.getView(this,"noWorkspace.jelly").forward(req,rsp);
return null;
} else {
Computer c = ws.toComputer();
String title;
if (c == null) {
title = Messages.AbstractProject_WorkspaceTitle(getDisplayName());
} else {
title = Messages.AbstractProject_WorkspaceTitleOnComputer(getDisplayName(), c.getDisplayName());
}
return new DirectoryBrowserSupport(this, ws, title, "folder.png", true);
}
}
/**
* Wipes out the workspace.
*/
@RequirePOST
public HttpResponse doDoWipeOutWorkspace() throws IOException, ServletException, InterruptedException {
checkPermission(Functions.isWipeOutPermissionEnabled() ? WIPEOUT : BUILD);
R b = getSomeBuildWithWorkspace();
FilePath ws = b!=null ? b.getWorkspace() : null;
if (ws!=null && getScm().processWorkspaceBeforeDeletion(this, ws, b.getBuiltOn())) {
ws.deleteRecursive();
for (WorkspaceListener wl : WorkspaceListener.all()) {
wl.afterDelete(this);
}
return new HttpRedirect(".");
} else {
// If we get here, that means the SCM blocked the workspace deletion.
return new ForwardToView(this,"wipeOutWorkspaceBlocked.jelly");
}
}
@CLIMethod(name="disable-job")
@RequirePOST
public HttpResponse doDisable() throws IOException, ServletException {
checkPermission(CONFIGURE);
makeDisabled(true);
return new HttpRedirect(".");
}
@CLIMethod(name="enable-job")
@RequirePOST
public HttpResponse doEnable() throws IOException, ServletException {
checkPermission(CONFIGURE);
makeDisabled(false);
return new HttpRedirect(".");
}
/**
* RSS feed for changes in this project.
*/
public void doRssChangelog( StaplerRequest req, StaplerResponse rsp ) throws IOException, ServletException {
class FeedItem {
ChangeLogSet.Entry e;
int idx;
public FeedItem(Entry e, int idx) {
this.e = e;
this.idx = idx;
}
AbstractBuild<?,?> getBuild() {
return e.getParent().build;
}
}
List<FeedItem> entries = new ArrayList<FeedItem>();
for(R r=getLastBuild(); r!=null; r=r.getPreviousBuild()) {
int idx=0;
for( ChangeLogSet.Entry e : r.getChangeSet())
entries.add(new FeedItem(e,idx++));
}
RSS.forwardToRss(
getDisplayName()+' '+getScm().getDescriptor().getDisplayName()+" changes",
getUrl()+"changes",
entries, new FeedAdapter<FeedItem>() {
public String getEntryTitle(FeedItem item) {
return "#"+item.getBuild().number+' '+item.e.getMsg()+" ("+item.e.getAuthor()+")";
}
public String getEntryUrl(FeedItem item) {
return item.getBuild().getUrl()+"changes#detail"+item.idx;
}
public String getEntryID(FeedItem item) {
return getEntryUrl(item);
}
public String getEntryDescription(FeedItem item) {
StringBuilder buf = new StringBuilder();
for(String path : item.e.getAffectedPaths())
buf.append(path).append('\n');
return buf.toString();
}
public Calendar getEntryTimestamp(FeedItem item) {
return item.getBuild().getTimestamp();
}
public String getEntryAuthor(FeedItem entry) {
return JenkinsLocationConfiguration.get().getAdminAddress();
}
},
req, rsp );
}
/**
* {@link AbstractProject} subtypes should implement this base class as a descriptor.
*
* @since 1.294
*/
public static abstract class AbstractProjectDescriptor extends TopLevelItemDescriptor {
/**
* {@link AbstractProject} subtypes can override this method to veto some {@link Descriptor}s
* from showing up on their configuration screen. This is often useful when you are building
* a workflow/company specific project type, where you want to limit the number of choices
* given to the users.
*
* <p>
* Some {@link Descriptor}s define their own schemes for controlling applicability
* (such as {@link BuildStepDescriptor#isApplicable(Class)}),
* This method works like AND in conjunction with them;
* Both this method and that method need to return true in order for a given {@link Descriptor}
* to show up for the given {@link Project}.
*
* <p>
* The default implementation returns true for everything.
*
* @see BuildStepDescriptor#isApplicable(Class)
* @see BuildWrapperDescriptor#isApplicable(AbstractProject)
* @see TriggerDescriptor#isApplicable(Item)
*/
@Override
public boolean isApplicable(Descriptor descriptor) {
return true;
}
@Restricted(DoNotUse.class)
public FormValidation doCheckAssignedLabelString(@AncestorInPath AbstractProject<?,?> project,
@QueryParameter String value) {
// Provide a legacy interface in case plugins are not going through p:config-assignedLabel
// see: JENKINS-25372
LOGGER.log(Level.WARNING, "checking label via legacy '_.assignedLabelString'");
return doCheckLabel(project, value);
}
public FormValidation doCheckLabel(@AncestorInPath AbstractProject<?,?> project,
@QueryParameter String value) {
return validateLabelExpression(value, project);
}
/**
* Validate label expression string.
*
* @param project May be specified to perform project specific validation.
* @since 1.590
*/
public static @Nonnull FormValidation validateLabelExpression(String value, @CheckForNull AbstractProject<?, ?> project) {
if (Util.fixEmpty(value)==null)
return FormValidation.ok(); // nothing typed yet
try {
Label.parseExpression(value);
} catch (ANTLRException e) {
return FormValidation.error(e,
Messages.AbstractProject_AssignedLabelString_InvalidBooleanExpression(e.getMessage()));
}
Jenkins j = Jenkins.getInstance();
Label l = j.getLabel(value);
if (l.isEmpty()) {
for (LabelAtom a : l.listAtoms()) {
if (a.isEmpty()) {
LabelAtom nearest = LabelAtom.findNearest(a.getName());
return FormValidation.warning(Messages.AbstractProject_AssignedLabelString_NoMatch_DidYouMean(a.getName(),nearest.getDisplayName()));
}
}
return FormValidation.warning(Messages.AbstractProject_AssignedLabelString_NoMatch());
}
if (project != null) {
for (AbstractProject.LabelValidator v : j
.getExtensionList(AbstractProject.LabelValidator.class)) {
FormValidation result = v.check(project, l);
if (!FormValidation.Kind.OK.equals(result.kind)) {
return result;
}
}
}
return FormValidation.okWithMarkup(Messages.AbstractProject_LabelLink(
j.getRootUrl(), Util.escape(l.getName()), l.getUrl(), l.getNodes().size(), l.getClouds().size())
);
}
public FormValidation doCheckCustomWorkspace(@QueryParameter String customWorkspace){
if(Util.fixEmptyAndTrim(customWorkspace)==null)
return FormValidation.error(Messages.AbstractProject_CustomWorkspaceEmpty());
else
return FormValidation.ok();
}
public AutoCompletionCandidates doAutoCompleteUpstreamProjects(@QueryParameter String value) {
AutoCompletionCandidates candidates = new AutoCompletionCandidates();
List<Job> jobs = Jenkins.getInstance().getItems(Job.class);
for (Job job: jobs) {
if (job.getFullName().startsWith(value)) {
if (job.hasPermission(Item.READ)) {
candidates.add(job.getFullName());
}
}
}
return candidates;
}
@Restricted(DoNotUse.class)
public AutoCompletionCandidates doAutoCompleteAssignedLabelString(@QueryParameter String value) {
// Provide a legacy interface in case plugins are not going through p:config-assignedLabel
// see: JENKINS-25372
LOGGER.log(Level.WARNING, "autocompleting label via legacy '_.assignedLabelString'");
return doAutoCompleteLabel(value);
}
public AutoCompletionCandidates doAutoCompleteLabel(@QueryParameter String value) {
AutoCompletionCandidates c = new AutoCompletionCandidates();
Set<Label> labels = Jenkins.getInstance().getLabels();
List<String> queries = new AutoCompleteSeeder(value).getSeeds();
for (String term : queries) {
for (Label l : labels) {
if (l.getName().startsWith(term)) {
c.add(l.getName());
}
}
}
return c;
}
public List<SCMCheckoutStrategyDescriptor> getApplicableSCMCheckoutStrategyDescriptors(AbstractProject p) {
return SCMCheckoutStrategyDescriptor._for(p);
}
/**
* Utility class for taking the current input value and computing a list
* of potential terms to match against the list of defined labels.
*/
static class AutoCompleteSeeder {
private String source;
AutoCompleteSeeder(String source) {
this.source = source;
}
List<String> getSeeds() {
ArrayList<String> terms = new ArrayList<String>();
boolean trailingQuote = source.endsWith("\"");
boolean leadingQuote = source.startsWith("\"");
boolean trailingSpace = source.endsWith(" ");
if (trailingQuote || (trailingSpace && !leadingQuote)) {
terms.add("");
} else {
if (leadingQuote) {
int quote = source.lastIndexOf('"');
if (quote == 0) {
terms.add(source.substring(1));
} else {
terms.add("");
}
} else {
int space = source.lastIndexOf(' ');
if (space > -1) {
terms.add(source.substring(space+1));
} else {
terms.add(source);
}
}
}
return terms;
}
}
}
/**
* Finds a {@link AbstractProject} that has the name closest to the given name.
* @see Items#findNearest
*/
public static @CheckForNull AbstractProject findNearest(String name) {
return findNearest(name,Jenkins.getInstance());
}
/**
* Finds a {@link AbstractProject} whose name (when referenced from the specified context) is closest to the given name.
*
* @since 1.419
* @see Items#findNearest
*/
public static @CheckForNull AbstractProject findNearest(String name, ItemGroup context) {
return Items.findNearest(AbstractProject.class, name, context);
}
private static final Comparator<Integer> REVERSE_INTEGER_COMPARATOR = new Comparator<Integer>() {
public int compare(Integer o1, Integer o2) {
return o2-o1;
}
};
private static final Logger LOGGER = Logger.getLogger(AbstractProject.class.getName());
/**
* @deprecated Just use {@link #CANCEL}.
*/
@Deprecated
public static final Permission ABORT = CANCEL;
/**
* @deprecated Use {@link ParameterizedJobMixIn#BUILD_NOW_TEXT}.
*/
@Deprecated
public static final Message<AbstractProject> BUILD_NOW_TEXT = new Message<AbstractProject>();
/**
* Used for CLI binding.
*/
@CLIResolver
public static AbstractProject resolveForCLI(
@Argument(required=true,metaVar="NAME",usage="Job name") String name) throws CmdLineException {
AbstractProject item = Jenkins.getInstance().getItemByFullName(name, AbstractProject.class);
if (item==null) {
AbstractProject project = AbstractProject.findNearest(name);
throw new CmdLineException(null, project == null ? Messages.AbstractItem_NoSuchJobExistsWithoutSuggestion(name)
: Messages.AbstractItem_NoSuchJobExists(name, project.getFullName()));
}
return item;
}
public String getCustomWorkspace() {
return customWorkspace;
}
/**
* User-specified workspace directory, or null if it's up to Jenkins.
*
* <p>
* Normally a project uses the workspace location assigned by its parent container,
* but sometimes people have builds that have hard-coded paths.
*
* <p>
* This is not {@link File} because it may have to hold a path representation on another OS.
*
* <p>
* If this path is relative, it's resolved against {@link Node#getRootPath()} on the node where this workspace
* is prepared.
*
* @since 1.410
*/
public void setCustomWorkspace(String customWorkspace) throws IOException {
this.customWorkspace= Util.fixEmptyAndTrim(customWorkspace);
save();
}
/**
* Plugins may want to contribute additional restrictions on the use of specific labels for specific projects.
* This extension point allows such restrictions.
*
* @since 1.540
*/
public static abstract class LabelValidator implements ExtensionPoint {
/**
* Check the use of the label within the specified context.
*
* @param project the project that wants to restrict itself to the specified label.
* @param label the label that the project wants to restrict itself to.
* @return the {@link FormValidation} result.
*/
@Nonnull
public abstract FormValidation check(@Nonnull AbstractProject<?, ?> project, @Nonnull Label label);
}
}
| 85,167 | 35.55279 | 218 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/AccountInstance.java
|
package org.telegram.messenger;
import org.telegram.tgnet.ConnectionsManager;
public class AccountInstance {
private int currentAccount;
private static volatile AccountInstance[] Instance = new AccountInstance[UserConfig.MAX_ACCOUNT_COUNT];
public static AccountInstance getInstance(int num) {
AccountInstance localInstance = Instance[num];
if (localInstance == null) {
synchronized (AccountInstance.class) {
localInstance = Instance[num];
if (localInstance == null) {
Instance[num] = localInstance = new AccountInstance(num);
}
}
}
return localInstance;
}
public AccountInstance(int instance) {
currentAccount = instance;
}
public MessagesController getMessagesController() {
return MessagesController.getInstance(currentAccount);
}
public MessagesStorage getMessagesStorage() {
return MessagesStorage.getInstance(currentAccount);
}
public ContactsController getContactsController() {
return ContactsController.getInstance(currentAccount);
}
public DataQuery getDataQuery() {
return DataQuery.getInstance(currentAccount);
}
public ConnectionsManager getConnectionsManager() {
return ConnectionsManager.getInstance(currentAccount);
}
public NotificationsController getNotificationsController() {
return NotificationsController.getInstance(currentAccount);
}
public NotificationCenter getNotificationCenter() {
return NotificationCenter.getInstance(currentAccount);
}
public UserConfig getUserConfig() {
return UserConfig.getInstance(currentAccount);
}
}
| 1,750 | 29.189655 | 107 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/AccountInstance2.java
|
package org.telegram.messenger;
import android.content.SharedPreferences;
import org.telegram.tgnet.ConnectionsManager;
public class AccountInstance {
private int currentAccount;
private static volatile AccountInstance[] Instance = new AccountInstance[UserConfig.MAX_ACCOUNT_COUNT];
public static AccountInstance getInstance(int num) {
AccountInstance localInstance = Instance[num];
if (localInstance == null) {
synchronized (AccountInstance.class) {
localInstance = Instance[num];
if (localInstance == null) {
Instance[num] = localInstance = new AccountInstance(num);
}
}
}
return localInstance;
}
public AccountInstance(int instance) {
currentAccount = instance;
}
public MessagesController getMessagesController() {
return MessagesController.getInstance(currentAccount);
}
public MessagesStorage getMessagesStorage() {
return MessagesStorage.getInstance(currentAccount);
}
public ContactsController getContactsController() {
return ContactsController.getInstance(currentAccount);
}
public MediaDataController getMediaDataController() {
return MediaDataController.getInstance(currentAccount);
}
public ConnectionsManager getConnectionsManager() {
return ConnectionsManager.getInstance(currentAccount);
}
public NotificationsController getNotificationsController() {
return NotificationsController.getInstance(currentAccount);
}
public NotificationCenter getNotificationCenter() {
return NotificationCenter.getInstance(currentAccount);
}
public LocationController getLocationController() {
return LocationController.getInstance(currentAccount);
}
public UserConfig getUserConfig() {
return UserConfig.getInstance(currentAccount);
}
public DownloadController getDownloadController() {
return DownloadController.getInstance(currentAccount);
}
public SendMessagesHelper getSendMessagesHelper() {
return SendMessagesHelper.getInstance(currentAccount);
}
public SecretChatHelper getSecretChatHelper() {
return SecretChatHelper.getInstance(currentAccount);
}
public StatsController getStatsController() {
return StatsController.getInstance(currentAccount);
}
public FileLoader getFileLoader() {
return FileLoader.getInstance(currentAccount);
}
public FileRefController getFileRefController() {
return FileRefController.getInstance(currentAccount);
}
public SharedPreferences getNotificationsSettings() {
return MessagesController.getNotificationsSettings(currentAccount);
}
}
| 2,804 | 29.48913 | 107 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/DefaultSubscriptionRegistry1.java
|
/*
* Copyright 2002-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.messaging.simp.broker;
import java.util.Collection;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArraySet;
import org.springframework.expression.EvaluationContext;
import org.springframework.expression.Expression;
import org.springframework.expression.ExpressionParser;
import org.springframework.expression.PropertyAccessor;
import org.springframework.expression.TypedValue;
import org.springframework.expression.spel.SpelEvaluationException;
import org.springframework.expression.spel.standard.SpelExpressionParser;
import org.springframework.expression.spel.support.SimpleEvaluationContext;
import org.springframework.messaging.Message;
import org.springframework.messaging.MessageHeaders;
import org.springframework.messaging.simp.SimpMessageHeaderAccessor;
import org.springframework.messaging.support.MessageHeaderAccessor;
import org.springframework.util.AntPathMatcher;
import org.springframework.util.Assert;
import org.springframework.util.LinkedMultiValueMap;
import org.springframework.util.MultiValueMap;
import org.springframework.util.PathMatcher;
import org.springframework.util.StringUtils;
/**
* Implementation of {@link SubscriptionRegistry} that stores subscriptions
* in memory and uses a {@link org.springframework.util.PathMatcher PathMatcher}
* for matching destinations.
*
* <p>As of 4.2, this class supports a {@link #setSelectorHeaderName selector}
* header on subscription messages with Spring EL expressions evaluated against
* the headers to filter out messages in addition to destination matching.
*
* @author Rossen Stoyanchev
* @author Sebastien Deleuze
* @author Juergen Hoeller
* @since 4.0
*/
public class DefaultSubscriptionRegistry extends AbstractSubscriptionRegistry {
/** Default maximum number of entries for the destination cache: 1024 */
public static final int DEFAULT_CACHE_LIMIT = 1024;
/** Static evaluation context to reuse */
private static EvaluationContext messageEvalContext =
SimpleEvaluationContext.forPropertyAccessors(new SimpMessageHeaderPropertyAccessor()).build();
private PathMatcher pathMatcher = new AntPathMatcher();
private volatile int cacheLimit = DEFAULT_CACHE_LIMIT;
private String selectorHeaderName = "selector";
private volatile boolean selectorHeaderInUse = false;
private final ExpressionParser expressionParser = new SpelExpressionParser();
private final DestinationCache destinationCache = new DestinationCache();
private final SessionSubscriptionRegistry subscriptionRegistry = new SessionSubscriptionRegistry();
/**
* Specify the {@link PathMatcher} to use.
*/
public void setPathMatcher(PathMatcher pathMatcher) {
this.pathMatcher = pathMatcher;
}
/**
* Return the configured {@link PathMatcher}.
*/
public PathMatcher getPathMatcher() {
return this.pathMatcher;
}
/**
* Specify the maximum number of entries for the resolved destination cache.
* Default is 1024.
*/
public void setCacheLimit(int cacheLimit) {
this.cacheLimit = cacheLimit;
}
/**
* Return the maximum number of entries for the resolved destination cache.
*/
public int getCacheLimit() {
return this.cacheLimit;
}
/**
* Configure the name of a header that a subscription message can have for
* the purpose of filtering messages matched to the subscription. The header
* value is expected to be a Spring EL boolean expression to be applied to
* the headers of messages matched to the subscription.
* <p>For example:
* <pre>
* headers.foo == 'bar'
* </pre>
* <p>By default this is set to "selector". You can set it to a different
* name, or to {@code null} to turn off support for a selector header.
* @param selectorHeaderName the name to use for a selector header
* @since 4.2
*/
public void setSelectorHeaderName(String selectorHeaderName) {
this.selectorHeaderName = StringUtils.hasText(selectorHeaderName) ? selectorHeaderName : null;
}
/**
* Return the name for the selector header name.
* @since 4.2
*/
public String getSelectorHeaderName() {
return this.selectorHeaderName;
}
@Override
protected void addSubscriptionInternal(
String sessionId, String subsId, String destination, Message<?> message) {
Expression expression = getSelectorExpression(message.getHeaders());
this.subscriptionRegistry.addSubscription(sessionId, subsId, destination, expression);
this.destinationCache.updateAfterNewSubscription(destination, sessionId, subsId);
}
private Expression getSelectorExpression(MessageHeaders headers) {
Expression expression = null;
if (getSelectorHeaderName() != null) {
String selector = SimpMessageHeaderAccessor.getFirstNativeHeader(getSelectorHeaderName(), headers);
if (selector != null) {
try {
expression = this.expressionParser.parseExpression(selector);
this.selectorHeaderInUse = true;
if (logger.isTraceEnabled()) {
logger.trace("Subscription selector: [" + selector + "]");
}
}
catch (Throwable ex) {
if (logger.isDebugEnabled()) {
logger.debug("Failed to parse selector: " + selector, ex);
}
}
}
}
return expression;
}
@Override
protected void removeSubscriptionInternal(String sessionId, String subsId, Message<?> message) {
SessionSubscriptionInfo info = this.subscriptionRegistry.getSubscriptions(sessionId);
if (info != null) {
String destination = info.removeSubscription(subsId);
if (destination != null) {
this.destinationCache.updateAfterRemovedSubscription(sessionId, subsId);
}
}
}
@Override
public void unregisterAllSubscriptions(String sessionId) {
SessionSubscriptionInfo info = this.subscriptionRegistry.removeSubscriptions(sessionId);
if (info != null) {
this.destinationCache.updateAfterRemovedSession(info);
}
}
@Override
protected MultiValueMap<String, String> findSubscriptionsInternal(String destination, Message<?> message) {
MultiValueMap<String, String> result = this.destinationCache.getSubscriptions(destination, message);
return filterSubscriptions(result, message);
}
private MultiValueMap<String, String> filterSubscriptions(
MultiValueMap<String, String> allMatches, Message<?> message) {
if (!this.selectorHeaderInUse) {
return allMatches;
}
MultiValueMap<String, String> result = new LinkedMultiValueMap<String, String>(allMatches.size());
for (String sessionId : allMatches.keySet()) {
for (String subId : allMatches.get(sessionId)) {
SessionSubscriptionInfo info = this.subscriptionRegistry.getSubscriptions(sessionId);
if (info == null) {
continue;
}
Subscription sub = info.getSubscription(subId);
if (sub == null) {
continue;
}
Expression expression = sub.getSelectorExpression();
if (expression == null) {
result.add(sessionId, subId);
continue;
}
try {
if (Boolean.TRUE.equals(expression.getValue(messageEvalContext, message, Boolean.class))) {
result.add(sessionId, subId);
}
}
catch (SpelEvaluationException ex) {
if (logger.isDebugEnabled()) {
logger.debug("Failed to evaluate selector: " + ex.getMessage());
}
}
catch (Throwable ex) {
logger.debug("Failed to evaluate selector", ex);
}
}
}
return result;
}
@Override
public String toString() {
return "DefaultSubscriptionRegistry[" + this.destinationCache + ", " + this.subscriptionRegistry + "]";
}
/**
* A cache for destinations previously resolved via
* {@link DefaultSubscriptionRegistry#findSubscriptionsInternal(String, Message)}
*/
private class DestinationCache {
/** Map from destination -> <sessionId, subscriptionId> for fast look-ups */
private final Map<String, LinkedMultiValueMap<String, String>> accessCache =
new ConcurrentHashMap<String, LinkedMultiValueMap<String, String>>(DEFAULT_CACHE_LIMIT);
/** Map from destination -> <sessionId, subscriptionId> with locking */
@SuppressWarnings("serial")
private final Map<String, LinkedMultiValueMap<String, String>> updateCache =
new LinkedHashMap<String, LinkedMultiValueMap<String, String>>(DEFAULT_CACHE_LIMIT, 0.75f, true) {
@Override
protected boolean removeEldestEntry(Map.Entry<String, LinkedMultiValueMap<String, String>> eldest) {
if (size() > getCacheLimit()) {
accessCache.remove(eldest.getKey());
return true;
}
else {
return false;
}
}
};
public LinkedMultiValueMap<String, String> getSubscriptions(String destination, Message<?> message) {
LinkedMultiValueMap<String, String> result = this.accessCache.get(destination);
if (result == null) {
synchronized (this.updateCache) {
result = new LinkedMultiValueMap<String, String>();
for (SessionSubscriptionInfo info : subscriptionRegistry.getAllSubscriptions()) {
for (String destinationPattern : info.getDestinations()) {
if (getPathMatcher().match(destinationPattern, destination)) {
for (Subscription subscription : info.getSubscriptions(destinationPattern)) {
result.add(info.sessionId, subscription.getId());
}
}
}
}
if (!result.isEmpty()) {
this.updateCache.put(destination, result.deepCopy());
this.accessCache.put(destination, result);
}
}
}
return result;
}
public void updateAfterNewSubscription(String destination, String sessionId, String subsId) {
synchronized (this.updateCache) {
for (Map.Entry<String, LinkedMultiValueMap<String, String>> entry : this.updateCache.entrySet()) {
String cachedDestination = entry.getKey();
if (getPathMatcher().match(destination, cachedDestination)) {
LinkedMultiValueMap<String, String> subs = entry.getValue();
// Subscription id's may also be populated via getSubscriptions()
List<String> subsForSession = subs.get(sessionId);
if (subsForSession == null || !subsForSession.contains(subsId)) {
subs.add(sessionId, subsId);
this.accessCache.put(cachedDestination, subs.deepCopy());
}
}
}
}
}
public void updateAfterRemovedSubscription(String sessionId, String subsId) {
synchronized (this.updateCache) {
Set<String> destinationsToRemove = new HashSet<String>();
for (Map.Entry<String, LinkedMultiValueMap<String, String>> entry : this.updateCache.entrySet()) {
String destination = entry.getKey();
LinkedMultiValueMap<String, String> sessionMap = entry.getValue();
List<String> subscriptions = sessionMap.get(sessionId);
if (subscriptions != null) {
subscriptions.remove(subsId);
if (subscriptions.isEmpty()) {
sessionMap.remove(sessionId);
}
if (sessionMap.isEmpty()) {
destinationsToRemove.add(destination);
}
else {
this.accessCache.put(destination, sessionMap.deepCopy());
}
}
}
for (String destination : destinationsToRemove) {
this.updateCache.remove(destination);
this.accessCache.remove(destination);
}
}
}
public void updateAfterRemovedSession(SessionSubscriptionInfo info) {
synchronized (this.updateCache) {
Set<String> destinationsToRemove = new HashSet<String>();
for (Map.Entry<String, LinkedMultiValueMap<String, String>> entry : this.updateCache.entrySet()) {
String destination = entry.getKey();
LinkedMultiValueMap<String, String> sessionMap = entry.getValue();
if (sessionMap.remove(info.getSessionId()) != null) {
if (sessionMap.isEmpty()) {
destinationsToRemove.add(destination);
}
else {
this.accessCache.put(destination, sessionMap.deepCopy());
}
}
}
for (String destination : destinationsToRemove) {
this.updateCache.remove(destination);
this.accessCache.remove(destination);
}
}
}
@Override
public String toString() {
return "cache[" + this.accessCache.size() + " destination(s)]";
}
}
/**
* Provide access to session subscriptions by sessionId.
*/
private static class SessionSubscriptionRegistry {
// sessionId -> SessionSubscriptionInfo
private final ConcurrentMap<String, SessionSubscriptionInfo> sessions =
new ConcurrentHashMap<String, SessionSubscriptionInfo>();
public SessionSubscriptionInfo getSubscriptions(String sessionId) {
return this.sessions.get(sessionId);
}
public Collection<SessionSubscriptionInfo> getAllSubscriptions() {
return this.sessions.values();
}
public SessionSubscriptionInfo addSubscription(String sessionId, String subscriptionId,
String destination, Expression selectorExpression) {
SessionSubscriptionInfo info = this.sessions.get(sessionId);
if (info == null) {
info = new SessionSubscriptionInfo(sessionId);
SessionSubscriptionInfo value = this.sessions.putIfAbsent(sessionId, info);
if (value != null) {
info = value;
}
}
info.addSubscription(destination, subscriptionId, selectorExpression);
return info;
}
public SessionSubscriptionInfo removeSubscriptions(String sessionId) {
return this.sessions.remove(sessionId);
}
@Override
public String toString() {
return "registry[" + this.sessions.size() + " sessions]";
}
}
/**
* Hold subscriptions for a session.
*/
private static class SessionSubscriptionInfo {
private final String sessionId;
// destination -> subscriptions
private final Map<String, Set<Subscription>> destinationLookup =
new ConcurrentHashMap<String, Set<Subscription>>(4);
public SessionSubscriptionInfo(String sessionId) {
Assert.notNull(sessionId, "'sessionId' must not be null");
this.sessionId = sessionId;
}
public String getSessionId() {
return this.sessionId;
}
public Set<String> getDestinations() {
return this.destinationLookup.keySet();
}
public Set<Subscription> getSubscriptions(String destination) {
return this.destinationLookup.get(destination);
}
public Subscription getSubscription(String subscriptionId) {
for (Map.Entry<String, Set<DefaultSubscriptionRegistry.Subscription>> destinationEntry : this.destinationLookup.entrySet()) {
Set<Subscription> subs = destinationEntry.getValue();
if (subs != null) {
for (Subscription sub : subs) {
if (sub.getId().equalsIgnoreCase(subscriptionId)) {
return sub;
}
}
}
}
return null;
}
public void addSubscription(String destination, String subscriptionId, Expression selectorExpression) {
Set<Subscription> subs = this.destinationLookup.get(destination);
if (subs == null) {
synchronized (this.destinationLookup) {
subs = this.destinationLookup.get(destination);
if (subs == null) {
subs = new CopyOnWriteArraySet<Subscription>();
this.destinationLookup.put(destination, subs);
}
}
}
subs.add(new Subscription(subscriptionId, selectorExpression));
}
public String removeSubscription(String subscriptionId) {
for (Map.Entry<String, Set<DefaultSubscriptionRegistry.Subscription>> destinationEntry : this.destinationLookup.entrySet()) {
Set<Subscription> subs = destinationEntry.getValue();
if (subs != null) {
for (Subscription sub : subs) {
if (sub.getId().equals(subscriptionId) && subs.remove(sub)) {
synchronized (this.destinationLookup) {
if (subs.isEmpty()) {
this.destinationLookup.remove(destinationEntry.getKey());
}
}
return destinationEntry.getKey();
}
}
}
}
return null;
}
@Override
public String toString() {
return "[sessionId=" + this.sessionId + ", subscriptions=" + this.destinationLookup + "]";
}
}
private static final class Subscription {
private final String id;
private final Expression selectorExpression;
public Subscription(String id, Expression selector) {
Assert.notNull(id, "Subscription id must not be null");
this.id = id;
this.selectorExpression = selector;
}
public String getId() {
return this.id;
}
public Expression getSelectorExpression() {
return this.selectorExpression;
}
@Override
public boolean equals(Object other) {
return (this == other || (other instanceof Subscription && this.id.equals(((Subscription) other).id)));
}
@Override
public int hashCode() {
return this.id.hashCode();
}
@Override
public String toString() {
return "subscription(id=" + this.id + ")";
}
}
private static class SimpMessageHeaderPropertyAccessor implements PropertyAccessor {
@Override
public Class<?>[] getSpecificTargetClasses() {
return new Class<?>[] {Message.class, MessageHeaders.class};
}
@Override
public boolean canRead(EvaluationContext context, Object target, String name) {
return true;
}
@Override
public TypedValue read(EvaluationContext context, Object target, String name) {
Object value;
if (target instanceof Message) {
value = name.equals("headers") ? ((Message) target).getHeaders() : null;
}
else if (target instanceof MessageHeaders) {
MessageHeaders headers = (MessageHeaders) target;
SimpMessageHeaderAccessor accessor =
MessageHeaderAccessor.getAccessor(headers, SimpMessageHeaderAccessor.class);
Assert.state(accessor != null, "No SimpMessageHeaderAccessor");
if ("destination".equalsIgnoreCase(name)) {
value = accessor.getDestination();
}
else {
value = accessor.getFirstNativeHeader(name);
if (value == null) {
value = headers.get(name);
}
}
}
else {
// Should never happen...
throw new IllegalStateException("Expected Message or MessageHeaders.");
}
return new TypedValue(value);
}
@Override
public boolean canWrite(EvaluationContext context, Object target, String name) {
return false;
}
@Override
public void write(EvaluationContext context, Object target, String name, Object value) {
}
}
}
| 18,696 | 31.180723 | 128 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/DefaultSubscriptionRegistry2.java
|
/*
* Copyright 2002-2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.messaging.simp.broker;
import java.util.Collection;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArraySet;
import org.springframework.expression.EvaluationContext;
import org.springframework.expression.Expression;
import org.springframework.expression.ExpressionParser;
import org.springframework.expression.PropertyAccessor;
import org.springframework.expression.TypedValue;
import org.springframework.expression.spel.SpelEvaluationException;
import org.springframework.expression.spel.standard.SpelExpressionParser;
import org.springframework.expression.spel.support.SimpleEvaluationContext;
import org.springframework.messaging.Message;
import org.springframework.messaging.MessageHeaders;
import org.springframework.messaging.simp.SimpMessageHeaderAccessor;
import org.springframework.messaging.support.MessageHeaderAccessor;
import org.springframework.util.AntPathMatcher;
import org.springframework.util.Assert;
import org.springframework.util.LinkedMultiValueMap;
import org.springframework.util.MultiValueMap;
import org.springframework.util.PathMatcher;
/**
* Implementation of {@link SubscriptionRegistry} that stores subscriptions
* in memory and uses a {@link org.springframework.util.PathMatcher PathMatcher}
* for matching destinations.
*
* <p>As of 4.2, this class supports a {@link #setSelectorHeaderName selector}
* header on subscription messages with Spring EL expressions evaluated against
* the headers to filter out messages in addition to destination matching.
*
* @author Rossen Stoyanchev
* @author Sebastien Deleuze
* @author Juergen Hoeller
* @since 4.0
*/
public class DefaultSubscriptionRegistry extends AbstractSubscriptionRegistry {
/** Default maximum number of entries for the destination cache: 1024 */
public static final int DEFAULT_CACHE_LIMIT = 1024;
/** Static evaluation context to reuse */
private static EvaluationContext messageEvalContext =
SimpleEvaluationContext.forPropertyAccessors(new SimpMessageHeaderPropertyAccessor()).build();
private PathMatcher pathMatcher = new AntPathMatcher();
private volatile int cacheLimit = DEFAULT_CACHE_LIMIT;
private String selectorHeaderName = "selector";
private volatile boolean selectorHeaderInUse = false;
private final ExpressionParser expressionParser = new SpelExpressionParser();
private final DestinationCache destinationCache = new DestinationCache();
private final SessionSubscriptionRegistry subscriptionRegistry = new SessionSubscriptionRegistry();
/**
* Specify the {@link PathMatcher} to use.
*/
public void setPathMatcher(PathMatcher pathMatcher) {
this.pathMatcher = pathMatcher;
}
/**
* Return the configured {@link PathMatcher}.
*/
public PathMatcher getPathMatcher() {
return this.pathMatcher;
}
/**
* Specify the maximum number of entries for the resolved destination cache.
* Default is 1024.
*/
public void setCacheLimit(int cacheLimit) {
this.cacheLimit = cacheLimit;
}
/**
* Return the maximum number of entries for the resolved destination cache.
*/
public int getCacheLimit() {
return this.cacheLimit;
}
/**
* Configure the name of a selector header that a subscription message can
* have in order to filter messages based on their headers. The value of the
* header can use Spring EL expressions against message headers.
* <p>For example the following expression expects a header called "foo" to
* have the value "bar":
* <pre>
* headers.foo == 'bar'
* </pre>
* <p>By default this is set to "selector".
* @since 4.2
*/
public void setSelectorHeaderName(String selectorHeaderName) {
Assert.notNull(selectorHeaderName, "'selectorHeaderName' must not be null");
this.selectorHeaderName = selectorHeaderName;
}
/**
* Return the name for the selector header.
* @since 4.2
*/
public String getSelectorHeaderName() {
return this.selectorHeaderName;
}
@Override
protected void addSubscriptionInternal(
String sessionId, String subsId, String destination, Message<?> message) {
Expression expression = null;
MessageHeaders headers = message.getHeaders();
String selector = SimpMessageHeaderAccessor.getFirstNativeHeader(getSelectorHeaderName(), headers);
if (selector != null) {
try {
expression = this.expressionParser.parseExpression(selector);
this.selectorHeaderInUse = true;
if (logger.isTraceEnabled()) {
logger.trace("Subscription selector: [" + selector + "]");
}
}
catch (Throwable ex) {
if (logger.isDebugEnabled()) {
logger.debug("Failed to parse selector: " + selector, ex);
}
}
}
this.subscriptionRegistry.addSubscription(sessionId, subsId, destination, expression);
this.destinationCache.updateAfterNewSubscription(destination, sessionId, subsId);
}
@Override
protected void removeSubscriptionInternal(String sessionId, String subsId, Message<?> message) {
SessionSubscriptionInfo info = this.subscriptionRegistry.getSubscriptions(sessionId);
if (info != null) {
String destination = info.removeSubscription(subsId);
if (destination != null) {
this.destinationCache.updateAfterRemovedSubscription(sessionId, subsId);
}
}
}
@Override
public void unregisterAllSubscriptions(String sessionId) {
SessionSubscriptionInfo info = this.subscriptionRegistry.removeSubscriptions(sessionId);
if (info != null) {
this.destinationCache.updateAfterRemovedSession(info);
}
}
@Override
protected MultiValueMap<String, String> findSubscriptionsInternal(String destination, Message<?> message) {
MultiValueMap<String, String> result = this.destinationCache.getSubscriptions(destination, message);
return filterSubscriptions(result, message);
}
private MultiValueMap<String, String> filterSubscriptions(
MultiValueMap<String, String> allMatches, Message<?> message) {
if (!this.selectorHeaderInUse) {
return allMatches;
}
MultiValueMap<String, String> result = new LinkedMultiValueMap<String, String>(allMatches.size());
for (String sessionId : allMatches.keySet()) {
for (String subId : allMatches.get(sessionId)) {
SessionSubscriptionInfo info = this.subscriptionRegistry.getSubscriptions(sessionId);
if (info == null) {
continue;
}
Subscription sub = info.getSubscription(subId);
if (sub == null) {
continue;
}
Expression expression = sub.getSelectorExpression();
if (expression == null) {
result.add(sessionId, subId);
continue;
}
try {
if (Boolean.TRUE.equals(expression.getValue(messageEvalContext, message, Boolean.class))) {
result.add(sessionId, subId);
}
}
catch (SpelEvaluationException ex) {
if (logger.isDebugEnabled()) {
logger.debug("Failed to evaluate selector: " + ex.getMessage());
}
}
catch (Throwable ex) {
logger.debug("Failed to evaluate selector", ex);
}
}
}
return result;
}
@Override
public String toString() {
return "DefaultSubscriptionRegistry[" + this.destinationCache + ", " + this.subscriptionRegistry + "]";
}
/**
* A cache for destinations previously resolved via
* {@link DefaultSubscriptionRegistry#findSubscriptionsInternal(String, Message)}
*/
private class DestinationCache {
/** Map from destination -> <sessionId, subscriptionId> for fast look-ups */
private final Map<String, LinkedMultiValueMap<String, String>> accessCache =
new ConcurrentHashMap<String, LinkedMultiValueMap<String, String>>(DEFAULT_CACHE_LIMIT);
/** Map from destination -> <sessionId, subscriptionId> with locking */
@SuppressWarnings("serial")
private final Map<String, LinkedMultiValueMap<String, String>> updateCache =
new LinkedHashMap<String, LinkedMultiValueMap<String, String>>(DEFAULT_CACHE_LIMIT, 0.75f, true) {
@Override
protected boolean removeEldestEntry(Map.Entry<String, LinkedMultiValueMap<String, String>> eldest) {
if (size() > getCacheLimit()) {
accessCache.remove(eldest.getKey());
return true;
}
else {
return false;
}
}
};
public LinkedMultiValueMap<String, String> getSubscriptions(String destination, Message<?> message) {
LinkedMultiValueMap<String, String> result = this.accessCache.get(destination);
if (result == null) {
synchronized (this.updateCache) {
result = new LinkedMultiValueMap<String, String>();
for (SessionSubscriptionInfo info : subscriptionRegistry.getAllSubscriptions()) {
for (String destinationPattern : info.getDestinations()) {
if (getPathMatcher().match(destinationPattern, destination)) {
for (Subscription subscription : info.getSubscriptions(destinationPattern)) {
result.add(info.sessionId, subscription.getId());
}
}
}
}
if (!result.isEmpty()) {
this.updateCache.put(destination, result.deepCopy());
this.accessCache.put(destination, result);
}
}
}
return result;
}
public void updateAfterNewSubscription(String destination, String sessionId, String subsId) {
synchronized (this.updateCache) {
for (Map.Entry<String, LinkedMultiValueMap<String, String>> entry : this.updateCache.entrySet()) {
String cachedDestination = entry.getKey();
if (getPathMatcher().match(destination, cachedDestination)) {
LinkedMultiValueMap<String, String> subs = entry.getValue();
// Subscription id's may also be populated via getSubscriptions()
List<String> subsForSession = subs.get(sessionId);
if (subsForSession == null || !subsForSession.contains(subsId)) {
subs.add(sessionId, subsId);
this.accessCache.put(cachedDestination, subs.deepCopy());
}
}
}
}
}
public void updateAfterRemovedSubscription(String sessionId, String subsId) {
synchronized (this.updateCache) {
Set<String> destinationsToRemove = new HashSet<String>();
for (Map.Entry<String, LinkedMultiValueMap<String, String>> entry : this.updateCache.entrySet()) {
String destination = entry.getKey();
LinkedMultiValueMap<String, String> sessionMap = entry.getValue();
List<String> subscriptions = sessionMap.get(sessionId);
if (subscriptions != null) {
subscriptions.remove(subsId);
if (subscriptions.isEmpty()) {
sessionMap.remove(sessionId);
}
if (sessionMap.isEmpty()) {
destinationsToRemove.add(destination);
}
else {
this.accessCache.put(destination, sessionMap.deepCopy());
}
}
}
for (String destination : destinationsToRemove) {
this.updateCache.remove(destination);
this.accessCache.remove(destination);
}
}
}
public void updateAfterRemovedSession(SessionSubscriptionInfo info) {
synchronized (this.updateCache) {
Set<String> destinationsToRemove = new HashSet<String>();
for (Map.Entry<String, LinkedMultiValueMap<String, String>> entry : this.updateCache.entrySet()) {
String destination = entry.getKey();
LinkedMultiValueMap<String, String> sessionMap = entry.getValue();
if (sessionMap.remove(info.getSessionId()) != null) {
if (sessionMap.isEmpty()) {
destinationsToRemove.add(destination);
}
else {
this.accessCache.put(destination, sessionMap.deepCopy());
}
}
}
for (String destination : destinationsToRemove) {
this.updateCache.remove(destination);
this.accessCache.remove(destination);
}
}
}
@Override
public String toString() {
return "cache[" + this.accessCache.size() + " destination(s)]";
}
}
/**
* Provide access to session subscriptions by sessionId.
*/
private static class SessionSubscriptionRegistry {
// sessionId -> SessionSubscriptionInfo
private final ConcurrentMap<String, SessionSubscriptionInfo> sessions =
new ConcurrentHashMap<String, SessionSubscriptionInfo>();
public SessionSubscriptionInfo getSubscriptions(String sessionId) {
return this.sessions.get(sessionId);
}
public Collection<SessionSubscriptionInfo> getAllSubscriptions() {
return this.sessions.values();
}
public SessionSubscriptionInfo addSubscription(String sessionId, String subscriptionId,
String destination, Expression selectorExpression) {
SessionSubscriptionInfo info = this.sessions.get(sessionId);
if (info == null) {
info = new SessionSubscriptionInfo(sessionId);
SessionSubscriptionInfo value = this.sessions.putIfAbsent(sessionId, info);
if (value != null) {
info = value;
}
}
info.addSubscription(destination, subscriptionId, selectorExpression);
return info;
}
public SessionSubscriptionInfo removeSubscriptions(String sessionId) {
return this.sessions.remove(sessionId);
}
@Override
public String toString() {
return "registry[" + this.sessions.size() + " sessions]";
}
}
/**
* Hold subscriptions for a session.
*/
private static class SessionSubscriptionInfo {
private final String sessionId;
// destination -> subscriptions
private final Map<String, Set<Subscription>> destinationLookup =
new ConcurrentHashMap<String, Set<Subscription>>(4);
public SessionSubscriptionInfo(String sessionId) {
Assert.notNull(sessionId, "'sessionId' must not be null");
this.sessionId = sessionId;
}
public String getSessionId() {
return this.sessionId;
}
public Set<String> getDestinations() {
return this.destinationLookup.keySet();
}
public Set<Subscription> getSubscriptions(String destination) {
return this.destinationLookup.get(destination);
}
public Subscription getSubscription(String subscriptionId) {
for (Map.Entry<String, Set<DefaultSubscriptionRegistry.Subscription>> destinationEntry : this.destinationLookup.entrySet()) {
Set<Subscription> subs = destinationEntry.getValue();
if (subs != null) {
for (Subscription sub : subs) {
if (sub.getId().equalsIgnoreCase(subscriptionId)) {
return sub;
}
}
}
}
return null;
}
public void addSubscription(String destination, String subscriptionId, Expression selectorExpression) {
Set<Subscription> subs = this.destinationLookup.get(destination);
if (subs == null) {
synchronized (this.destinationLookup) {
subs = this.destinationLookup.get(destination);
if (subs == null) {
subs = new CopyOnWriteArraySet<Subscription>();
this.destinationLookup.put(destination, subs);
}
}
}
subs.add(new Subscription(subscriptionId, selectorExpression));
}
public String removeSubscription(String subscriptionId) {
for (Map.Entry<String, Set<DefaultSubscriptionRegistry.Subscription>> destinationEntry : this.destinationLookup.entrySet()) {
Set<Subscription> subs = destinationEntry.getValue();
if (subs != null) {
for (Subscription sub : subs) {
if (sub.getId().equals(subscriptionId) && subs.remove(sub)) {
synchronized (this.destinationLookup) {
if (subs.isEmpty()) {
this.destinationLookup.remove(destinationEntry.getKey());
}
}
return destinationEntry.getKey();
}
}
}
}
return null;
}
@Override
public String toString() {
return "[sessionId=" + this.sessionId + ", subscriptions=" + this.destinationLookup + "]";
}
}
private static final class Subscription {
private final String id;
private final Expression selectorExpression;
public Subscription(String id, Expression selector) {
Assert.notNull(id, "Subscription id must not be null");
this.id = id;
this.selectorExpression = selector;
}
public String getId() {
return this.id;
}
public Expression getSelectorExpression() {
return this.selectorExpression;
}
@Override
public boolean equals(Object other) {
return (this == other || (other instanceof Subscription && this.id.equals(((Subscription) other).id)));
}
@Override
public int hashCode() {
return this.id.hashCode();
}
@Override
public String toString() {
return "subscription(id=" + this.id + ")";
}
}
private static class SimpMessageHeaderPropertyAccessor implements PropertyAccessor {
@Override
public Class<?>[] getSpecificTargetClasses() {
return new Class<?>[] {Message.class, MessageHeaders.class};
}
@Override
public boolean canRead(EvaluationContext context, Object target, String name) {
return true;
}
@Override
public TypedValue read(EvaluationContext context, Object target, String name) {
Object value;
if (target instanceof Message) {
value = name.equals("headers") ? ((Message) target).getHeaders() : null;
}
else if (target instanceof MessageHeaders) {
MessageHeaders headers = (MessageHeaders) target;
SimpMessageHeaderAccessor accessor =
MessageHeaderAccessor.getAccessor(headers, SimpMessageHeaderAccessor.class);
Assert.state(accessor != null, "No SimpMessageHeaderAccessor");
if ("destination".equalsIgnoreCase(name)) {
value = accessor.getDestination();
}
else {
value = accessor.getFirstNativeHeader(name);
if (value == null) {
value = headers.get(name);
}
}
}
else {
// Should never happen...
throw new IllegalStateException("Expected Message or MessageHeaders.");
}
return new TypedValue(value);
}
@Override
public boolean canWrite(EvaluationContext context, Object target, String name) {
return false;
}
@Override
public void write(EvaluationContext context, Object target, String name, Object value) {
}
}
}
| 18,347 | 31.020942 | 128 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/Downsampler.java
|
package com.bumptech.glide.load.resource.bitmap;
import android.annotation.TargetApi;
import android.graphics.Bitmap;
import android.graphics.Bitmap.Config;
import android.graphics.BitmapFactory;
import android.graphics.ColorSpace;
import android.os.Build;
import android.util.DisplayMetrics;
import android.util.Log;
import androidx.annotation.Nullable;
import com.bumptech.glide.load.DecodeFormat;
import com.bumptech.glide.load.ImageHeaderParser;
import com.bumptech.glide.load.ImageHeaderParser.ImageType;
import com.bumptech.glide.load.ImageHeaderParserUtils;
import com.bumptech.glide.load.Option;
import com.bumptech.glide.load.Options;
import com.bumptech.glide.load.PreferredColorSpace;
import com.bumptech.glide.load.engine.Resource;
import com.bumptech.glide.load.engine.bitmap_recycle.ArrayPool;
import com.bumptech.glide.load.engine.bitmap_recycle.BitmapPool;
import com.bumptech.glide.load.resource.bitmap.DownsampleStrategy.SampleSizeRounding;
import com.bumptech.glide.request.RequestOptions;
import com.bumptech.glide.request.target.Target;
import com.bumptech.glide.util.LogTime;
import com.bumptech.glide.util.Preconditions;
import com.bumptech.glide.util.Util;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import java.util.Queue;
import java.util.Set;
/**
* Downsamples, decodes, and rotates images according to their exif orientation using {@link
* BitmapFactory}.
*/
public final class Downsampler {
static final String TAG = "Downsampler";
/**
* Indicates the {@link com.bumptech.glide.load.DecodeFormat} that will be used in conjunction
* with the image format to determine the {@link android.graphics.Bitmap.Config} to provide to
* {@link android.graphics.BitmapFactory.Options#inPreferredConfig} when decoding the image.
*/
public static final Option<DecodeFormat> DECODE_FORMAT =
Option.memory(
"com.bumptech.glide.load.resource.bitmap.Downsampler.DecodeFormat", DecodeFormat.DEFAULT);
/**
* Sets the {@link PreferredColorSpace} that will be used along with the version of Android and
* color space of the requested image to determine the final color space used to decode the image.
*
* <p>Refer to {@link PreferredColorSpace} for details on how this option works and its various
* limitations.
*/
public static final Option<PreferredColorSpace> PREFERRED_COLOR_SPACE =
Option.memory(
"com.bumptech.glide.load.resource.bitmap.Downsampler.PreferredColorSpace",
PreferredColorSpace.SRGB);
/**
* Indicates the {@link com.bumptech.glide.load.resource.bitmap.DownsampleStrategy} option that
* will be used to calculate the sample size to use to downsample an image given the original and
* target dimensions of the image.
*
* @deprecated Use {@link DownsampleStrategy#OPTION} directly instead.
*/
@Deprecated
public static final Option<DownsampleStrategy> DOWNSAMPLE_STRATEGY = DownsampleStrategy.OPTION;
/**
* Ensure that the size of the bitmap is fixed to the requested width and height of the resource
* from the caller. The final resource dimensions may differ from the requested width and height,
* and thus setting this to true may result in the bitmap size differing from the resource
* dimensions.
*
* <p>This can be used as a performance optimization for KitKat and above by fixing the size of
* the bitmap for a collection of requested resources so that the bitmap pool will not need to
* allocate new bitmaps for images of different sizes.
*/
// Public API
@SuppressWarnings("WeakerAccess")
public static final Option<Boolean> FIX_BITMAP_SIZE_TO_REQUESTED_DIMENSIONS =
Option.memory("com.bumptech.glide.load.resource.bitmap.Downsampler.FixBitmapSize", false);
/**
* Indicates that it's safe or unsafe to decode {@link Bitmap}s with {@link
* Bitmap.Config#HARDWARE}.
*
* <p>Callers should almost never set this value to {@code true} manually. Glide will already do
* so when Glide believes it's safe to do (when no transformations are applied). Instead, callers
* can set this value to {@code false} to prevent Glide from decoding hardware bitmaps if Glide is
* unable to detect that hardware bitmaps are unsafe. For example, you should set this to {@code
* false} if you plan to draw it to a software {@link android.graphics.Canvas} or if you plan to
* inspect the {@link Bitmap}s pixels with {@link Bitmap#getPixel(int, int)} or {@link
* Bitmap#getPixels(int[], int, int, int, int, int, int)}.
*
* <p>Callers can disable hardware {@link Bitmap}s for all loads using {@link
* com.bumptech.glide.GlideBuilder#setDefaultRequestOptions(RequestOptions)}.
*
* <p>This option is ignored unless we're on Android O+.
*/
public static final Option<Boolean> ALLOW_HARDWARE_CONFIG =
Option.memory(
"com.bumptech.glide.load.resource.bitmap.Downsampler.AllowHardwareDecode", false);
private static final String WBMP_MIME_TYPE = "image/vnd.wap.wbmp";
private static final String ICO_MIME_TYPE = "image/x-ico";
private static final Set<String> NO_DOWNSAMPLE_PRE_N_MIME_TYPES =
Collections.unmodifiableSet(new HashSet<>(Arrays.asList(WBMP_MIME_TYPE, ICO_MIME_TYPE)));
private static final DecodeCallbacks EMPTY_CALLBACKS =
new DecodeCallbacks() {
@Override
public void onObtainBounds() {
// Do nothing.
}
@Override
public void onDecodeComplete(BitmapPool bitmapPool, Bitmap downsampled) {
// Do nothing.
}
};
private static final Set<ImageHeaderParser.ImageType> TYPES_THAT_USE_POOL_PRE_KITKAT =
Collections.unmodifiableSet(
EnumSet.of(
ImageHeaderParser.ImageType.JPEG,
ImageHeaderParser.ImageType.PNG_A,
ImageHeaderParser.ImageType.PNG));
private static final Queue<BitmapFactory.Options> OPTIONS_QUEUE = Util.createQueue(0);
// 10MB. This is the max image header size we can handle, we preallocate a much smaller buffer
// but will resize up to this amount if necessary.
private static final int MARK_POSITION = 10 * 1024 * 1024;
private final BitmapPool bitmapPool;
private final DisplayMetrics displayMetrics;
private final ArrayPool byteArrayPool;
private final List<ImageHeaderParser> parsers;
private final HardwareConfigState hardwareConfigState = HardwareConfigState.getInstance();
public Downsampler(
List<ImageHeaderParser> parsers,
DisplayMetrics displayMetrics,
BitmapPool bitmapPool,
ArrayPool byteArrayPool) {
this.parsers = parsers;
this.displayMetrics = Preconditions.checkNotNull(displayMetrics);
this.bitmapPool = Preconditions.checkNotNull(bitmapPool);
this.byteArrayPool = Preconditions.checkNotNull(byteArrayPool);
}
public boolean handles(@SuppressWarnings("unused") InputStream is) {
// We expect Downsampler to handle any available type Android supports.
return true;
}
public boolean handles(@SuppressWarnings("unused") ByteBuffer byteBuffer) {
// We expect downsampler to handle any available type Android supports.
return true;
}
/**
* Returns a Bitmap decoded from the given {@link InputStream} that is rotated to match any EXIF
* data present in the stream and that is downsampled according to the given dimensions and any
* provided {@link com.bumptech.glide.load.resource.bitmap.DownsampleStrategy} option.
*
* @see #decode(InputStream, int, int, Options, DecodeCallbacks)
*/
public Resource<Bitmap> decode(InputStream is, int outWidth, int outHeight, Options options)
throws IOException {
return decode(is, outWidth, outHeight, options, EMPTY_CALLBACKS);
}
/**
* Returns a Bitmap decoded from the given {@link InputStream} that is rotated to match any EXIF
* data present in the stream and that is downsampled according to the given dimensions and any
* provided {@link com.bumptech.glide.load.resource.bitmap.DownsampleStrategy} option.
*
* <p>If a Bitmap is present in the {@link
* com.bumptech.glide.load.engine.bitmap_recycle.BitmapPool} whose dimensions exactly match those
* of the image for the given InputStream is available, the operation is much less expensive in
* terms of memory.
*
* <p>The provided {@link java.io.InputStream} must return <code>true</code> from {@link
* java.io.InputStream#markSupported()} and is expected to support a reasonably large mark limit
* to accommodate reading large image headers (~5MB).
*
* @param is An {@link InputStream} to the data for the image.
* @param requestedWidth The width the final image should be close to.
* @param requestedHeight The height the final image should be close to.
* @param options A set of options that may contain one or more supported options that influence
* how a Bitmap will be decoded from the given stream.
* @param callbacks A set of callbacks allowing callers to optionally respond to various
* significant events during the decode process.
* @return A new bitmap containing the image from the given InputStream, or recycle if recycle is
* not null.
*/
@SuppressWarnings({"resource", "deprecation"})
public Resource<Bitmap> decode(
InputStream is,
int requestedWidth,
int requestedHeight,
Options options,
DecodeCallbacks callbacks)
throws IOException {
Preconditions.checkArgument(
is.markSupported(), "You must provide an InputStream that supports" + " mark()");
byte[] bytesForOptions = byteArrayPool.get(ArrayPool.STANDARD_BUFFER_SIZE_BYTES, byte[].class);
BitmapFactory.Options bitmapFactoryOptions = getDefaultOptions();
bitmapFactoryOptions.inTempStorage = bytesForOptions;
DecodeFormat decodeFormat = options.get(DECODE_FORMAT);
PreferredColorSpace preferredColorSpace = options.get(PREFERRED_COLOR_SPACE);
DownsampleStrategy downsampleStrategy = options.get(DownsampleStrategy.OPTION);
boolean fixBitmapToRequestedDimensions = options.get(FIX_BITMAP_SIZE_TO_REQUESTED_DIMENSIONS);
boolean isHardwareConfigAllowed =
options.get(ALLOW_HARDWARE_CONFIG) != null && options.get(ALLOW_HARDWARE_CONFIG);
try {
Bitmap result =
decodeFromWrappedStreams(
is,
bitmapFactoryOptions,
downsampleStrategy,
decodeFormat,
preferredColorSpace,
isHardwareConfigAllowed,
requestedWidth,
requestedHeight,
fixBitmapToRequestedDimensions,
callbacks);
return BitmapResource.obtain(result, bitmapPool);
} finally {
releaseOptions(bitmapFactoryOptions);
byteArrayPool.put(bytesForOptions);
}
}
private Bitmap decodeFromWrappedStreams(
InputStream is,
BitmapFactory.Options options,
DownsampleStrategy downsampleStrategy,
DecodeFormat decodeFormat,
PreferredColorSpace preferredColorSpace,
boolean isHardwareConfigAllowed,
int requestedWidth,
int requestedHeight,
boolean fixBitmapToRequestedDimensions,
DecodeCallbacks callbacks)
throws IOException {
long startTime = LogTime.getLogTime();
int[] sourceDimensions = getDimensions(is, options, callbacks, bitmapPool);
int sourceWidth = sourceDimensions[0];
int sourceHeight = sourceDimensions[1];
String sourceMimeType = options.outMimeType;
// If we failed to obtain the image dimensions, we may end up with an incorrectly sized Bitmap,
// so we want to use a mutable Bitmap type. One way this can happen is if the image header is so
// large (10mb+) that our attempt to use inJustDecodeBounds fails and we're forced to decode the
// full size image.
if (sourceWidth == -1 || sourceHeight == -1) {
isHardwareConfigAllowed = false;
}
int orientation = ImageHeaderParserUtils.getOrientation(parsers, is, byteArrayPool);
int degreesToRotate = TransformationUtils.getExifOrientationDegrees(orientation);
boolean isExifOrientationRequired = TransformationUtils.isExifOrientationRequired(orientation);
int targetWidth = requestedWidth == Target.SIZE_ORIGINAL ? sourceWidth : requestedWidth;
int targetHeight = requestedHeight == Target.SIZE_ORIGINAL ? sourceHeight : requestedHeight;
ImageType imageType = ImageHeaderParserUtils.getType(parsers, is, byteArrayPool);
calculateScaling(
imageType,
is,
callbacks,
bitmapPool,
downsampleStrategy,
degreesToRotate,
sourceWidth,
sourceHeight,
targetWidth,
targetHeight,
options);
calculateConfig(
is,
decodeFormat,
isHardwareConfigAllowed,
isExifOrientationRequired,
options,
targetWidth,
targetHeight);
boolean isKitKatOrGreater = Build.VERSION.SDK_INT >= Build.VERSION_CODES.KITKAT;
// Prior to KitKat, the inBitmap size must exactly match the size of the bitmap we're decoding.
if ((options.inSampleSize == 1 || isKitKatOrGreater) && shouldUsePool(imageType)) {
int expectedWidth;
int expectedHeight;
if (sourceWidth >= 0
&& sourceHeight >= 0
&& fixBitmapToRequestedDimensions
&& isKitKatOrGreater) {
expectedWidth = targetWidth;
expectedHeight = targetHeight;
} else {
float densityMultiplier =
isScaling(options) ? (float) options.inTargetDensity / options.inDensity : 1f;
int sampleSize = options.inSampleSize;
int downsampledWidth = (int) Math.ceil(sourceWidth / (float) sampleSize);
int downsampledHeight = (int) Math.ceil(sourceHeight / (float) sampleSize);
expectedWidth = Math.round(downsampledWidth * densityMultiplier);
expectedHeight = Math.round(downsampledHeight * densityMultiplier);
if (Log.isLoggable(TAG, Log.VERBOSE)) {
Log.v(
TAG,
"Calculated target ["
+ expectedWidth
+ "x"
+ expectedHeight
+ "] for source"
+ " ["
+ sourceWidth
+ "x"
+ sourceHeight
+ "]"
+ ", sampleSize: "
+ sampleSize
+ ", targetDensity: "
+ options.inTargetDensity
+ ", density: "
+ options.inDensity
+ ", density multiplier: "
+ densityMultiplier);
}
}
// If this isn't an image, or BitmapFactory was unable to parse the size, width and height
// will be -1 here.
if (expectedWidth > 0 && expectedHeight > 0) {
setInBitmap(options, bitmapPool, expectedWidth, expectedHeight);
}
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.P) {
boolean isP3Eligible =
preferredColorSpace == PreferredColorSpace.DISPLAY_P3
&& options.outColorSpace != null
&& options.outColorSpace.isWideGamut();
options.inPreferredColorSpace =
ColorSpace.get(isP3Eligible ? ColorSpace.Named.DISPLAY_P3 : ColorSpace.Named.SRGB);
} else if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
options.inPreferredColorSpace = ColorSpace.get(ColorSpace.Named.SRGB);
}
Bitmap downsampled = decodeStream(is, options, callbacks, bitmapPool);
callbacks.onDecodeComplete(bitmapPool, downsampled);
if (Log.isLoggable(TAG, Log.VERBOSE)) {
logDecode(
sourceWidth,
sourceHeight,
sourceMimeType,
options,
downsampled,
requestedWidth,
requestedHeight,
startTime);
}
Bitmap rotated = null;
if (downsampled != null) {
// If we scaled, the Bitmap density will be our inTargetDensity. Here we correct it back to
// the expected density dpi.
downsampled.setDensity(displayMetrics.densityDpi);
rotated = TransformationUtils.rotateImageExif(bitmapPool, downsampled, orientation);
if (!downsampled.equals(rotated)) {
bitmapPool.put(downsampled);
}
}
return rotated;
}
private static void calculateScaling(
ImageType imageType,
InputStream is,
DecodeCallbacks decodeCallbacks,
BitmapPool bitmapPool,
DownsampleStrategy downsampleStrategy,
int degreesToRotate,
int sourceWidth,
int sourceHeight,
int targetWidth,
int targetHeight,
BitmapFactory.Options options)
throws IOException {
// We can't downsample source content if we can't determine its dimensions.
if (sourceWidth <= 0 || sourceHeight <= 0) {
if (Log.isLoggable(TAG, Log.DEBUG)) {
Log.d(
TAG,
"Unable to determine dimensions for: "
+ imageType
+ " with target ["
+ targetWidth
+ "x"
+ targetHeight
+ "]");
}
return;
}
int orientedSourceWidth = sourceWidth;
int orientedSourceHeight = sourceHeight;
// If we're rotating the image +-90 degrees, we need to downsample accordingly so the image
// width is decreased to near our target's height and the image height is decreased to near
// our target width.
//noinspection SuspiciousNameCombination
if (degreesToRotate == 90 || degreesToRotate == 270) {
orientedSourceWidth = sourceHeight;
orientedSourceHeight = sourceWidth;
}
final float exactScaleFactor =
downsampleStrategy.getScaleFactor(
orientedSourceWidth, orientedSourceHeight, targetWidth, targetHeight);
if (exactScaleFactor <= 0f) {
throw new IllegalArgumentException(
"Cannot scale with factor: "
+ exactScaleFactor
+ " from: "
+ downsampleStrategy
+ ", source: ["
+ sourceWidth
+ "x"
+ sourceHeight
+ "]"
+ ", target: ["
+ targetWidth
+ "x"
+ targetHeight
+ "]");
}
SampleSizeRounding rounding =
downsampleStrategy.getSampleSizeRounding(
orientedSourceWidth, orientedSourceHeight, targetWidth, targetHeight);
if (rounding == null) {
throw new IllegalArgumentException("Cannot round with null rounding");
}
int outWidth = round(exactScaleFactor * orientedSourceWidth);
int outHeight = round(exactScaleFactor * orientedSourceHeight);
int widthScaleFactor = orientedSourceWidth / outWidth;
int heightScaleFactor = orientedSourceHeight / outHeight;
// TODO: This isn't really right for both CenterOutside and CenterInside. Consider allowing
// DownsampleStrategy to pick, or trying to do something more sophisticated like picking the
// scale factor that leads to an exact match.
int scaleFactor =
rounding == SampleSizeRounding.MEMORY
? Math.max(widthScaleFactor, heightScaleFactor)
: Math.min(widthScaleFactor, heightScaleFactor);
int powerOfTwoSampleSize;
// BitmapFactory does not support downsampling wbmp files on platforms <= M. See b/27305903.
if (Build.VERSION.SDK_INT <= 23
&& NO_DOWNSAMPLE_PRE_N_MIME_TYPES.contains(options.outMimeType)) {
powerOfTwoSampleSize = 1;
} else {
powerOfTwoSampleSize = Math.max(1, Integer.highestOneBit(scaleFactor));
if (rounding == SampleSizeRounding.MEMORY
&& powerOfTwoSampleSize < (1.f / exactScaleFactor)) {
powerOfTwoSampleSize = powerOfTwoSampleSize << 1;
}
}
// Here we mimic framework logic for determining how inSampleSize division is rounded on various
// versions of Android. The logic here has been tested on emulators for Android versions 15-26.
// PNG - Always uses floor
// JPEG - Always uses ceiling
// Webp - Prior to N, always uses floor. At and after N, always uses round.
options.inSampleSize = powerOfTwoSampleSize;
int powerOfTwoWidth;
int powerOfTwoHeight;
if (imageType == ImageType.JPEG) {
// libjpegturbo can downsample up to a sample size of 8. libjpegturbo uses ceiling to round.
// After libjpegturbo's native rounding, skia does a secondary scale using floor
// (integer division). Here we replicate that logic.
int nativeScaling = Math.min(powerOfTwoSampleSize, 8);
powerOfTwoWidth = (int) Math.ceil(orientedSourceWidth / (float) nativeScaling);
powerOfTwoHeight = (int) Math.ceil(orientedSourceHeight / (float) nativeScaling);
int secondaryScaling = powerOfTwoSampleSize / 8;
if (secondaryScaling > 0) {
powerOfTwoWidth = powerOfTwoWidth / secondaryScaling;
powerOfTwoHeight = powerOfTwoHeight / secondaryScaling;
}
} else if (imageType == ImageType.PNG || imageType == ImageType.PNG_A) {
powerOfTwoWidth = (int) Math.floor(orientedSourceWidth / (float) powerOfTwoSampleSize);
powerOfTwoHeight = (int) Math.floor(orientedSourceHeight / (float) powerOfTwoSampleSize);
} else if (imageType == ImageType.WEBP || imageType == ImageType.WEBP_A) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
powerOfTwoWidth = Math.round(orientedSourceWidth / (float) powerOfTwoSampleSize);
powerOfTwoHeight = Math.round(orientedSourceHeight / (float) powerOfTwoSampleSize);
} else {
powerOfTwoWidth = (int) Math.floor(orientedSourceWidth / (float) powerOfTwoSampleSize);
powerOfTwoHeight = (int) Math.floor(orientedSourceHeight / (float) powerOfTwoSampleSize);
}
} else if (orientedSourceWidth % powerOfTwoSampleSize != 0
|| orientedSourceHeight % powerOfTwoSampleSize != 0) {
// If we're not confident the image is in one of our types, fall back to checking the
// dimensions again. inJustDecodeBounds decodes do obey inSampleSize.
int[] dimensions = getDimensions(is, options, decodeCallbacks, bitmapPool);
// Power of two downsampling in BitmapFactory uses a variety of random factors to determine
// rounding that we can't reliably replicate for all image formats. Use ceiling here to make
// sure that we at least provide a Bitmap that's large enough to fit the content we're going
// to load.
powerOfTwoWidth = dimensions[0];
powerOfTwoHeight = dimensions[1];
} else {
powerOfTwoWidth = orientedSourceWidth / powerOfTwoSampleSize;
powerOfTwoHeight = orientedSourceHeight / powerOfTwoSampleSize;
}
double adjustedScaleFactor =
downsampleStrategy.getScaleFactor(
powerOfTwoWidth, powerOfTwoHeight, targetWidth, targetHeight);
// Density scaling is only supported if inBitmap is null prior to KitKat. Avoid setting
// densities here so we calculate the final Bitmap size correctly.
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.KITKAT) {
options.inTargetDensity = adjustTargetDensityForError(adjustedScaleFactor);
options.inDensity = getDensityMultiplier(adjustedScaleFactor);
}
if (isScaling(options)) {
options.inScaled = true;
} else {
options.inDensity = options.inTargetDensity = 0;
}
if (Log.isLoggable(TAG, Log.VERBOSE)) {
Log.v(
TAG,
"Calculate scaling"
+ ", source: ["
+ sourceWidth
+ "x"
+ sourceHeight
+ "]"
+ ", degreesToRotate: "
+ degreesToRotate
+ ", target: ["
+ targetWidth
+ "x"
+ targetHeight
+ "]"
+ ", power of two scaled: ["
+ powerOfTwoWidth
+ "x"
+ powerOfTwoHeight
+ "]"
+ ", exact scale factor: "
+ exactScaleFactor
+ ", power of 2 sample size: "
+ powerOfTwoSampleSize
+ ", adjusted scale factor: "
+ adjustedScaleFactor
+ ", target density: "
+ options.inTargetDensity
+ ", density: "
+ options.inDensity);
}
}
/**
* BitmapFactory calculates the density scale factor as a float. This introduces some non-trivial
* error. This method attempts to account for that error by adjusting the inTargetDensity so that
* the final scale factor is as close to our target as possible.
*/
private static int adjustTargetDensityForError(double adjustedScaleFactor) {
int densityMultiplier = getDensityMultiplier(adjustedScaleFactor);
int targetDensity = round(densityMultiplier * adjustedScaleFactor);
float scaleFactorWithError = targetDensity / (float) densityMultiplier;
double difference = adjustedScaleFactor / scaleFactorWithError;
return round(difference * targetDensity);
}
private static int getDensityMultiplier(double adjustedScaleFactor) {
return (int)
Math.round(
Integer.MAX_VALUE
* (adjustedScaleFactor <= 1D ? adjustedScaleFactor : 1 / adjustedScaleFactor));
}
// This is weird, but it matches the logic in a bunch of Android views/framework classes for
// rounding.
private static int round(double value) {
return (int) (value + 0.5d);
}
private boolean shouldUsePool(ImageType imageType) {
// On KitKat+, any bitmap (of a given config) can be used to decode any other bitmap
// (with the same config).
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.KITKAT) {
return true;
}
// We cannot reuse bitmaps when decoding images that are not PNG or JPG prior to KitKat.
// See: https://groups.google.com/forum/#!msg/android-developers/Mp0MFVFi1Fo/e8ZQ9FGdWdEJ
return TYPES_THAT_USE_POOL_PRE_KITKAT.contains(imageType);
}
@SuppressWarnings("deprecation")
private void calculateConfig(
InputStream is,
DecodeFormat format,
boolean isHardwareConfigAllowed,
boolean isExifOrientationRequired,
BitmapFactory.Options optionsWithScaling,
int targetWidth,
int targetHeight) {
if (hardwareConfigState.setHardwareConfigIfAllowed(
targetWidth,
targetHeight,
optionsWithScaling,
isHardwareConfigAllowed,
isExifOrientationRequired)) {
return;
}
// Changing configs can cause skewing on 4.1, see issue #128.
if (format == DecodeFormat.PREFER_ARGB_8888
|| Build.VERSION.SDK_INT == Build.VERSION_CODES.JELLY_BEAN) {
optionsWithScaling.inPreferredConfig = Bitmap.Config.ARGB_8888;
return;
}
boolean hasAlpha = false;
try {
hasAlpha = ImageHeaderParserUtils.getType(parsers, is, byteArrayPool).hasAlpha();
} catch (IOException e) {
if (Log.isLoggable(TAG, Log.DEBUG)) {
Log.d(
TAG,
"Cannot determine whether the image has alpha or not from header"
+ ", format "
+ format,
e);
}
}
optionsWithScaling.inPreferredConfig =
hasAlpha ? Bitmap.Config.ARGB_8888 : Bitmap.Config.RGB_565;
if (optionsWithScaling.inPreferredConfig == Config.RGB_565) {
optionsWithScaling.inDither = true;
}
}
/**
* A method for getting the dimensions of an image from the given InputStream.
*
* @param is The InputStream representing the image.
* @param options The options to pass to {@link BitmapFactory#decodeStream(java.io.InputStream,
* android.graphics.Rect, android.graphics.BitmapFactory.Options)}.
* @return an array containing the dimensions of the image in the form {width, height}.
*/
private static int[] getDimensions(
InputStream is,
BitmapFactory.Options options,
DecodeCallbacks decodeCallbacks,
BitmapPool bitmapPool)
throws IOException {
options.inJustDecodeBounds = true;
decodeStream(is, options, decodeCallbacks, bitmapPool);
options.inJustDecodeBounds = false;
return new int[] {options.outWidth, options.outHeight};
}
private static Bitmap decodeStream(
InputStream is,
BitmapFactory.Options options,
DecodeCallbacks callbacks,
BitmapPool bitmapPool)
throws IOException {
if (options.inJustDecodeBounds) {
is.mark(MARK_POSITION);
} else {
// Once we've read the image header, we no longer need to allow the buffer to expand in
// size. To avoid unnecessary allocations reading image data, we fix the mark limit so that it
// is no larger than our current buffer size here. We need to do so immediately before
// decoding the full image to avoid having our mark limit overridden by other calls to
// mark and reset. See issue #225.
callbacks.onObtainBounds();
}
// BitmapFactory.Options out* variables are reset by most calls to decodeStream, successful or
// otherwise, so capture here in case we log below.
int sourceWidth = options.outWidth;
int sourceHeight = options.outHeight;
String outMimeType = options.outMimeType;
final Bitmap result;
TransformationUtils.getBitmapDrawableLock().lock();
try {
result = BitmapFactory.decodeStream(is, null, options);
} catch (IllegalArgumentException e) {
IOException bitmapAssertionException =
newIoExceptionForInBitmapAssertion(e, sourceWidth, sourceHeight, outMimeType, options);
if (Log.isLoggable(TAG, Log.DEBUG)) {
Log.d(
TAG,
"Failed to decode with inBitmap, trying again without Bitmap re-use",
bitmapAssertionException);
}
if (options.inBitmap != null) {
try {
is.reset();
bitmapPool.put(options.inBitmap);
options.inBitmap = null;
return decodeStream(is, options, callbacks, bitmapPool);
} catch (IOException resetException) {
throw bitmapAssertionException;
}
}
throw bitmapAssertionException;
} finally {
TransformationUtils.getBitmapDrawableLock().unlock();
}
if (options.inJustDecodeBounds) {
is.reset();
}
return result;
}
private static boolean isScaling(BitmapFactory.Options options) {
return options.inTargetDensity > 0
&& options.inDensity > 0
&& options.inTargetDensity != options.inDensity;
}
private static void logDecode(
int sourceWidth,
int sourceHeight,
String outMimeType,
BitmapFactory.Options options,
Bitmap result,
int requestedWidth,
int requestedHeight,
long startTime) {
Log.v(
TAG,
"Decoded "
+ getBitmapString(result)
+ " from ["
+ sourceWidth
+ "x"
+ sourceHeight
+ "] "
+ outMimeType
+ " with inBitmap "
+ getInBitmapString(options)
+ " for ["
+ requestedWidth
+ "x"
+ requestedHeight
+ "]"
+ ", sample size: "
+ options.inSampleSize
+ ", density: "
+ options.inDensity
+ ", target density: "
+ options.inTargetDensity
+ ", thread: "
+ Thread.currentThread().getName()
+ ", duration: "
+ LogTime.getElapsedMillis(startTime));
}
private static String getInBitmapString(BitmapFactory.Options options) {
return getBitmapString(options.inBitmap);
}
@Nullable
@TargetApi(Build.VERSION_CODES.KITKAT)
private static String getBitmapString(Bitmap bitmap) {
if (bitmap == null) {
return null;
}
String sizeString =
Build.VERSION.SDK_INT >= Build.VERSION_CODES.KITKAT
? " (" + bitmap.getAllocationByteCount() + ")"
: "";
return "["
+ bitmap.getWidth()
+ "x"
+ bitmap.getHeight()
+ "] "
+ bitmap.getConfig()
+ sizeString;
}
// BitmapFactory throws an IllegalArgumentException if any error occurs attempting to decode a
// file when inBitmap is non-null, including those caused by partial or corrupt data. We still log
// the error because the IllegalArgumentException is supposed to catch errors reusing Bitmaps, so
// want some useful log output. In most cases this can be safely treated as a normal IOException.
private static IOException newIoExceptionForInBitmapAssertion(
IllegalArgumentException e,
int outWidth,
int outHeight,
String outMimeType,
BitmapFactory.Options options) {
return new IOException(
"Exception decoding bitmap"
+ ", outWidth: "
+ outWidth
+ ", outHeight: "
+ outHeight
+ ", outMimeType: "
+ outMimeType
+ ", inBitmap: "
+ getInBitmapString(options),
e);
}
@SuppressWarnings("PMD.CollapsibleIfStatements")
@TargetApi(Build.VERSION_CODES.O)
private static void setInBitmap(
BitmapFactory.Options options, BitmapPool bitmapPool, int width, int height) {
@Nullable Bitmap.Config expectedConfig = null;
// Avoid short circuiting, it appears to break on some devices.
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
if (options.inPreferredConfig == Config.HARDWARE) {
return;
}
// On API 26 outConfig may be null for some images even if the image is valid, can be decoded
// and outWidth/outHeight/outColorSpace are populated (see b/71513049).
expectedConfig = options.outConfig;
}
if (expectedConfig == null) {
// We're going to guess that BitmapFactory will return us the config we're requesting. This
// isn't always the case, even though our guesses tend to be conservative and prefer configs
// of larger sizes so that the Bitmap will fit our image anyway. If we're wrong here and the
// config we choose is too small, our initial decode will fail, but we will retry with no
// inBitmap which will succeed so if we're wrong here, we're less efficient but still correct.
expectedConfig = options.inPreferredConfig;
}
// BitmapFactory will clear out the Bitmap before writing to it, so getDirty is safe.
options.inBitmap = bitmapPool.getDirty(width, height, expectedConfig);
}
private static synchronized BitmapFactory.Options getDefaultOptions() {
BitmapFactory.Options decodeBitmapOptions;
synchronized (OPTIONS_QUEUE) {
decodeBitmapOptions = OPTIONS_QUEUE.poll();
}
if (decodeBitmapOptions == null) {
decodeBitmapOptions = new BitmapFactory.Options();
resetOptions(decodeBitmapOptions);
}
return decodeBitmapOptions;
}
private static void releaseOptions(BitmapFactory.Options decodeBitmapOptions) {
resetOptions(decodeBitmapOptions);
synchronized (OPTIONS_QUEUE) {
OPTIONS_QUEUE.offer(decodeBitmapOptions);
}
}
@SuppressWarnings("deprecation")
private static void resetOptions(BitmapFactory.Options decodeBitmapOptions) {
decodeBitmapOptions.inTempStorage = null;
decodeBitmapOptions.inDither = false;
decodeBitmapOptions.inScaled = false;
decodeBitmapOptions.inSampleSize = 1;
decodeBitmapOptions.inPreferredConfig = null;
decodeBitmapOptions.inJustDecodeBounds = false;
decodeBitmapOptions.inDensity = 0;
decodeBitmapOptions.inTargetDensity = 0;
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
decodeBitmapOptions.outColorSpace = null;
decodeBitmapOptions.outConfig = null;
}
decodeBitmapOptions.outWidth = 0;
decodeBitmapOptions.outHeight = 0;
decodeBitmapOptions.outMimeType = null;
decodeBitmapOptions.inBitmap = null;
decodeBitmapOptions.inMutable = true;
}
/** Callbacks for key points during decodes. */
public interface DecodeCallbacks {
void onObtainBounds();
void onDecodeComplete(BitmapPool bitmapPool, Bitmap downsampled) throws IOException;
}
}
| 36,300 | 38.935094 | 100 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/FilterDefuse1.java
|
package collect;
import gumtreediff.actions.ActionGenerator;
import gumtreediff.actions.model.Action;
import gumtreediff.io.TreeIoUtils;
import gumtreediff.matchers.MappingStore;
import gumtreediff.matchers.Matcher;
import gumtreediff.matchers.Matchers;
import gumtreediff.tree.ITree;
import gumtreediff.tree.TreeContext;
import split.Split;
import structure.API;
import structure.Definition;
import structure.Migration;
import structure.SubTree;
import utils.Defuse;
import utils.Output;
import utils.Utils;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.*;
/*
* statement-level single line extraction
*/
public class FilterDefuse {
private static LinkedHashSet<API> apis = new LinkedHashSet<API>();
private static HashMap<SubTree, SubTree> treePairs = new HashMap<SubTree, SubTree>();
private static int count = 0;
public static void main (String args[]) throws Exception{
String path = "I:\\20210714-Srqtrans_testcase\\Vulnerability_testcase\\";
String outMode = "txt";
String numDir = "data_num\\";
String checkDir = "data_check\\";
String varDir = "data_var\\";
multiCollect(path, outMode, numDir);
// String cpPath = path+"cp48";
// FilterDefuse defuse = new FilterDefuse();
// defuse.collectDiffwithDefUse(cpPath, outMode, true, true, "");
}
public static void multiCollect(String path, String outMode, String numDir) throws Exception {
// if(outMode.equals("txt"))
// FileOperation.delAllFile(dataDir);
// if(outMode.equals("lineNum")) {
// FileOperation.delAllFile(numDir);
// FileOperation.delAllFile(varDir);
// FileOperation.delAllFile(checkDir);
// }
if(outMode.equals("json")) {
String jpath = "jsons\\";
File jFile = new File(jpath);
if(!jFile.exists())
jFile.mkdirs();
if(jFile.listFiles().length!=0&&outMode.equals("json"))
throw new Exception("pls clean dir!");
}
ArrayList<String> existList = checkExist(numDir);
File rootFile = new File(path);
File[] fileList = rootFile.listFiles();
System.out.println(fileList.length);
for(int i=0;i<fileList.length;i++) {
File cpFile = fileList[i];
System.out.println(i+":"+cpFile.getName());
if(existList.contains(cpFile.getName()))
continue;
String cpPath = cpFile.getAbsolutePath();
FilterDefuse defuse = new FilterDefuse();
defuse.collectDiffwithDefUse(cpPath, outMode, true, true, "");
}
System.out.println("DuplicateNum:"+count);
}
public void collectDiffwithDefUse(String path, String outMode,
Boolean ifOnlyChange, Boolean ifPrintDef, String filter) throws Exception {//获取DefUse
Split sp = new Split();
ArrayList<Migration> migrats = FileFilter.readMigrationList(path, filter);
String repoName = "";
if(migrats.size()!=0)
repoName = migrats.get(0).getRepoName();
else
return;
String txtName = (new File(path)).getName();
String jpath = "jsons\\";
File jFile = new File(jpath);
if(!jFile.exists())
jFile.mkdirs();
String outPath = "data\\defuse_"+txtName+".txt";
String outPath1 = "data\\src-val_"+txtName+".txt";
String outPath2 = "data\\tgt-val_"+txtName+".txt";
String outPath3 = "data_num\\"+repoName+"_"+txtName+".txt";
String outPath4 = "data_var\\"+repoName+"_"+txtName+"_defs_src.txt";
String outPath5 = "data_var\\"+repoName+"_"+txtName+"_defs_dst.txt";
String outPath6 = "data_check\\"+repoName+"_"+txtName+".txt";
int errCount = 0;
for(Migration migrat : migrats) {
Defuse defuse = new Defuse();
String miName_src = migrat.getMiName_src();
String miName_dst = migrat.getMiName_dst();
TreeContext sTC = migrat.getSrcT();
TreeContext dTC = migrat.getDstT();
MappingStore mappings = migrat.getMappings();
HashMap<ITree, ITree> leaf2parblock_map_src = defuse.searchBlockMap(sTC);
HashMap<ITree, ITree> leaf2parblock_map_dst = defuse.searchBlockMap(dTC);
System.out.println("Analyse:"+miName_src);
ArrayList<SubTree> changedSTree = new ArrayList<>();
HashMap<String, LinkedList<Action>> actions = Utils.collectAction(sTC, dTC, mappings);
System.out.println("111");
ArrayList<Integer> srcActIds = Utils.collectSrcActNodeIds(sTC, dTC, mappings, actions);
System.out.println("2222");
// if(srcActIds.contains(2333)) {
// System.out.println("indeed");
// }else {
// System.out.println("not contains");
// }
ArrayList<Definition> defs1 = defuse.getDef(sTC, "src");//先计算action,再收集defs
ArrayList<Definition> defs2 = defuse.getDef(dTC, "tgt");
HashMap<String, ArrayList<Definition>> defMap1 = defuse.transferDefs(defs1);
HashMap<String, ArrayList<Definition>> defMap2 = defuse.transferDefs(defs2);
HashMap<ITree, ArrayList<Definition>> blockMap1 = defuse.transferBlockMap(defs1, sTC, "src");
HashMap<ITree, ArrayList<Definition>> blockMap2 = defuse.transferBlockMap(defs2, dTC, "tgt");
ArrayList<SubTree> sub1 = sp.splitSubTree(sTC, miName_src);//Subtree中割裂过block,注意
ArrayList<SubTree> sub2 = sp.splitSubTree(dTC, miName_src);//先计算action,再split ST
HashMap<Integer, HashMap<String, String>> usedDefs2Map = new HashMap<Integer, HashMap<String, String>>();
System.out.println("def1size:"+defs1.size());
System.out.println("def2size:"+defs2.size());
System.out.println("def1Mapsize:"+defMap1.size());
System.out.println("def2Mapsize:"+defMap2.size());
System.out.println("block1size:"+blockMap1.size());
System.out.println("block2size:"+blockMap2.size());
// for(SubTree st : sub1) {
// ITree root = st.getRoot();
// System.err.println("StID:"+root.getId());
// }
if(ifOnlyChange==true) {
for(SubTree st : sub1) {
ITree t = st.getRoot();
// System.out.println("StID:"+t.getId());
List<ITree> nodeList = t.getDescendants();
nodeList.add(t);
// for(ITree node : nodeList) {
// int id = node.getId();
// System.out.println("nodeid:"+id);
// }
for(ITree node : nodeList) {
int id = node.getId();
if(srcActIds.contains(id)) {
changedSTree.add(st);
// System.out.println("find a action subtree!"+t.getId());
break;
}
}
}//先找包含action的subtree
}else {
changedSTree = sub1;
}
System.out.println("subSize:"+sub1.size());
System.out.println("changeSize:"+changedSTree.size());
for(SubTree srcT : changedSTree) {
// System.out.println("===================");
HashMap<String, String> replaceMap_src = new HashMap<String, String>();
HashMap<String, String> replaceMap_dst = new HashMap<String, String>();
HashSet<Definition> usedDefs1 = new HashSet<Definition>();
HashSet<Definition> usedDefs2 = new HashSet<Definition>();
ITree sRoot = srcT.getRoot();
// System.out.println("CheckMapping "+sRoot.getId()+":"+srcT.getMiName());
String src = Output.subtree2src(srcT);
if(outMode.equals("txt")&&(src.contains("error")&&src.contains("situation"))) {
errCount++;
continue;
}
Boolean same = false;
ArrayList<ITree> leaves1 = new ArrayList<ITree>();
Utils.traverse2Leaf(sRoot, leaves1);
int labelCount = 0;
for(ITree leaf : leaves1) {
String label = leaf.getLabel();
// System.out.println("label:"+label);
if(!label.equals(""))
labelCount++;
String type = sTC.getTypeLabel(leaf);
if(type.equals("literal")) {
leaf.setLabel(Output.deleteLiteral(leaf, sTC));
// if(label.contains("\""))
// replaceMap_src.put("@@"+label+"@@", "None");
// else
// replaceMap_src.put("$$"+label+"$$", "num");//replace Literal
}
ArrayList<Definition> stringList = defMap1.get(label);
if(stringList!=null) {
ITree parBlock = leaf2parblock_map_src.get(leaf);
ArrayList<Definition> blockList = blockMap1.get(parBlock);
for(Definition def1 : stringList) {
if(blockList!=null) {
if(blockList.contains(def1)) {
if(leaf.getId()>def1.getDefLabelID()) {
usedDefs1.add(def1);
System.out.println("DefTest: "+leaf.getLabel()+","+leaf.getId()+","+def1.getDefLabelID());
// leaf.setLabel("var");
replaceMap_src.put(label, label);
}
}
}
if(def1.getDefLabelID()==leaf.getId()) {
// leaf.setLabel("var");
replaceMap_src.put(label, label);
}
// System.out.println(leaf.getId()+","+def1.getDefLabelID());
// System.out.println("Def:"+def1.getType()+","+def1.getVarName());
}
}
}
if(labelCount==0) {
System.err.println("labelCount is 0");
continue;
}
SubTree dstT = defuse.checkMapping(srcT, mappings, dTC, sub2);
if(dstT==null) {
System.err.println("no dstT searched");
continue;//子树没有对应子树,被删除
}
ITree dRoot = dstT.getRoot();
// System.out.println(sRoot.getId()+"->"+dRoot.getId());
List<ITree> nodes1 = sRoot.getDescendants();
nodes1.add(sRoot);//srcT所有节点
List<ITree> nodes2 = dRoot.getDescendants();
nodes2.add(dRoot);//dstT所有节点
int sBeginLine = 0;
int sLastLine = 0;
int sBeginCol = 0;
int sLastCol = 0;
int dBeginLine = 0;
int dLastLine = 0;
int dBeginCol = 0;
int dLastCol = 0;
if(outMode.equals("lineNum")) {
for(ITree node : nodes1) {
int line = node.getLine();
int col = node.getColumn();
int lastLine = node.getLastLine();
int lastCol = node.getLastColumn();
String type = sTC.getTypeLabel(node);
if(!type.equals("block")) {//跳过block节点,该节点会导致lastline为大括号结束位置
if(sBeginLine==0&&line!=0) {
sBeginLine = line;
}else if(line < sBeginLine&&line!=0) {
sBeginLine = line;
}//begin line
if(sBeginCol==0&&col!=0) {
sBeginCol = col;
}else if(col < sBeginCol&&col!=0) {
sBeginCol = col;
}//begin col
if(lastLine > sLastLine) {
sLastLine = lastLine;
}//last line
if(lastCol > sLastCol) {
sLastCol = lastCol;
}//last col
// if(sRoot.getId()==16329) {
// System.err.println(node.getId()+type+":"+line+","+lastLine+","+col+","+lastCol);
// }
}else if(type.equals("empty_stmt"))//特殊情况
continue;
}
for(ITree node : nodes2) {
int line = node.getLine();
int col = node.getColumn();
int lastLine = node.getLastLine();
int lastCol = node.getLastColumn();
String type = sTC.getTypeLabel(node);
if(!type.equals("block")) {//跳过block节点,该节点会导致lastline为大括号结束位置
if(dBeginLine==0&&line!=0) {
dBeginLine = line;
}else if(line < dBeginLine&&line!=0) {
dBeginLine = line;
}//begin line
if(dBeginCol==0&&col!=0) {
dBeginCol = col;
}else if(col < dBeginCol&&col!=0) {
dBeginCol = col;
}//begin col
if(dLastLine < lastLine) {
dLastLine = lastLine;
}//last line
if(dLastCol < lastCol) {
dLastCol = lastCol;
}//last col
}else if(type.equals("empty_stmt"))//特殊情况
continue;
}
}
if(usedDefs2Map.get(dRoot.getId())==null) {
ArrayList<ITree> leaves2 = new ArrayList<ITree>();
Utils.traverse2Leaf(dRoot, leaves2);
for(ITree leaf : leaves2) {
String label = leaf.getLabel();
String type = dTC.getTypeLabel(leaf);
if(type.equals("literal")) {
leaf.setLabel(Output.deleteLiteral(leaf, dTC));
// if(label.contains("\""))
// replaceMap_dst.put("@@"+label+"@@", "None");
// else
// replaceMap_dst.put("$$"+label+"$$", "num");//replace Literal
}
ArrayList<Definition> stringList = defMap2.get(label);
if(stringList!=null) {
ITree parBlock = leaf2parblock_map_dst.get(leaf);
ArrayList<Definition> blockList = blockMap2.get(parBlock);
for(Definition def2 : stringList) {
if(blockList!=null) {
if(blockList.contains(def2)) {
if(leaf.getId()>def2.getDefLabelID()) {
usedDefs2.add(def2);
// leaf.setLabel("var");
replaceMap_dst.put(label, label);
}
// System.out.println(leaf.getId()+","+def2.getDefLabelID());
// System.out.println(def2.getType()+","+def2.getVarName());
}
}
if(def2.getDefLabelID()==leaf.getId()) {
// leaf.setLabel("var");
replaceMap_dst.put(label, label);
}
}
}
if(same==false) {
for(ITree leaf1 : leaves1) {
String label1 = leaf1.getLabel();
if(label.equals(label1)) {
same = true;
}
}
}
}
usedDefs2Map.put(dRoot.getId(), replaceMap_dst);
}else {
same = true;
replaceMap_dst = usedDefs2Map.get(dRoot.getId());
}//发现有不同subtree_src映射到同一subTree_dst情况,matching算法问题暂时无法解决
//处理措施为直接复制一份replaceMap_dst,跳过
src = Output.subtree2src(srcT);
String tar = Output.subtree2src(dstT);
if(outMode.equals("txt")) {
if(tar.contains("error")&&tar.contains("situation")) {
errCount++;
continue;
}
if(((float)src.length()/(float)tar.length())<0.25||((float)tar.length()/(float)src.length())<0.25) {
continue;
}//长度相差太多的句子直接跳过
if(ifOnlyChange==true) {
if(src.equals(tar))
continue;
}//去掉相同句子
}
if(same==false) {
System.err.println("No leaf is the same");
continue;//no leaf is the same
}
if(outMode.equals("txt")) {
if(ifPrintDef==true) {
printDefs(usedDefs1);
String buffer = getDefTxt(usedDefs1, usedDefs2, sTC, dTC, srcT, dstT);
printTxt(outPath, outPath1, outPath2, buffer);
}else {
String buffer = getText(sTC, dTC, srcT, dstT);
printTxt(outPath, outPath1, outPath2, buffer);
}
}else if(outMode.equals("json")) {
srcT = absTree(srcT);
dstT = absTree(dstT);
TreeContext st = defuse.buildTC(srcT);
TreeContext dt = defuse.buildTC(dstT);
if(checkSim(st, dt)==false) {
printJson(jpath, st, dt);
treePairs.put(srcT, dstT);
}
}else if(outMode.equals("lineNum")) {
// if(sRoot.getId()==806) {
// for(Map.Entry<String, String> entry : replaceMap_src.entrySet()) {
// String varName = entry.getKey();
// String label = entry.getValue();
// System.err.println(varName+"->"+label+";");
// }
// }
String diffLine_check = "STID:"+srcT.getRoot().getId()+","
+sBeginLine+","+sLastLine+","+sBeginCol+","+sLastCol+"->"
+dBeginLine+","+dLastLine+","+dBeginCol+","+dLastCol;
String diffLine = miName_src+";"+miName_dst+";"
+sBeginLine+","+sLastLine+","+sBeginCol+","+sLastCol+"->"
+dBeginLine+","+dLastLine+","+dBeginCol+","+dLastCol;
printLineNum(outPath3, outPath4, outPath5, diffLine, replaceMap_src, replaceMap_dst);
printLineCheck(outPath6, diffLine_check);
}
}
}
System.out.println("errCount:"+errCount);
}
static private void printLineCheck(String outPath6, String diffLine_check) throws IOException {
File output6 = new File(outPath6);
BufferedWriter wr6 = new BufferedWriter(new FileWriter(output6, true));
wr6.append(diffLine_check);
wr6.newLine();
wr6.flush();
wr6.close();
}
static private void printLineNum(String outPath3, String outPath4, String outPath5, String diffLine,
HashMap<String , String> replaceMap_src, HashMap<String , String> replaceMap_dst) throws Exception {
File output3 = new File(outPath3);
BufferedWriter wr3 = new BufferedWriter(new FileWriter(output3, true));
File output4 = new File(outPath4);
BufferedWriter wr4 = new BufferedWriter(new FileWriter(output4, true));
File output5 = new File(outPath5);
BufferedWriter wr5 = new BufferedWriter(new FileWriter(output5, true));
wr3.append(diffLine);
wr3.newLine();
wr3.flush();
for(Map.Entry<String, String> entry : replaceMap_src.entrySet()) {
String varName = entry.getKey();
String label = entry.getValue();
wr4.append(varName+"->"+label+";");
}
wr4.newLine();
wr4.flush();
// System.out.println("STID:"+srcT.getRoot().getId()+","+dstT.getRoot().getId());
// System.out.println(replaceMap_dst.size());
for(Map.Entry<String, String> entry : replaceMap_dst.entrySet()) {
String varName = entry.getKey();
String label = entry.getValue();
wr5.append(varName+"->"+label+";");
}
wr5.newLine();
wr5.flush();
wr3.close();
wr4.close();
wr5.close();
}
static private void printDefs(HashSet<Definition> defs) {
System.out.println("usedDefs1:"+defs.size());
for(Definition def : defs) {
String label = def.getVarName();
System.out.println(label);
}
}
static private String getDefTxt(HashSet<Definition> usedDefs1, HashSet<Definition> usedDefs2,
TreeContext tc1, TreeContext tc2, SubTree srcT, SubTree dstT) throws Exception {
String buffer = "";
for(Definition def : usedDefs1) {
SubTree st = new SubTree(def.getRoot(), tc1, 0, "");
String stat = Output.subtree2src(st);
buffer = buffer +stat+" ; ";
}
String src = Output.subtree2src(srcT);
buffer = buffer + src+"\t";
for(Definition def : usedDefs2) {
SubTree st = new SubTree(def.getRoot(), tc2, 0, "");
String stat = Output.subtree2src(st);
buffer += stat+" ; ";
}
String tar = Output.subtree2src(dstT);
buffer += tar;
if(buffer.contains("error")&&buffer.contains("situation"))
return null;
return buffer;
}
static private SubTree absTree(SubTree st) {
ITree root = st.getRoot();
List<ITree> desList = root.getDescendants();
for(ITree node : desList) {
String label = node.getLabel();
try {
Integer.parseInt(label);
node.setLabel("num");
} catch (Exception e) {
// TODO: handle exception
}
}
return st;
}
static private ArrayList<String> checkExist(String outPath){
ArrayList<String> existList = new ArrayList<String>();
File outDir = new File(outPath);
File[] cpFiles = outDir.listFiles();
System.out.println(cpFiles.length);
for(File cpFile : cpFiles) {
String name = cpFile.getName();
String[] tmp = name.split("\\.")[0].split("_");
String cpNum = tmp[tmp.length-1];
existList.add(cpNum);
}
return existList;
}//断点重新开始任务用
static private Boolean checkSim(TreeContext tc1, TreeContext tc2) {
Boolean full_sim = false;
Defuse defuse = new Defuse();
for(Map.Entry<SubTree, SubTree> entry : treePairs.entrySet()) {
SubTree st1 = entry.getKey();
SubTree st2 = entry.getValue();
try {
TreeContext tc1_used = defuse.buildTC(st1);
TreeContext tc2_used = defuse.buildTC(st2);
Matcher m1 = Matchers.getInstance().getMatcher(tc1.getRoot(), tc1_used.getRoot());
m1.match();
MappingStore mappings1 = m1.getMappings();
ActionGenerator g1 = new ActionGenerator(tc1.getRoot(), tc1_used.getRoot(), mappings1);
List<Action> actions1 = g1.generate();
Matcher m2 = Matchers.getInstance().getMatcher(tc2.getRoot(), tc2_used.getRoot());
m2.match();
MappingStore mappings2 = m2.getMappings();
ActionGenerator g2 = new ActionGenerator(tc2.getRoot(), tc2_used.getRoot(), mappings2);
List<Action> actions2 = g2.generate();
if(actions1.size()==0&&actions2.size()==0) {
full_sim = true;
count++;
return full_sim;
}
} catch (Exception e) {
continue;// TODO: handle exception
}
}
return full_sim;
}
static private String getText(TreeContext tc1, TreeContext tc2, SubTree srcT, SubTree dstT) throws Exception {
String buffer = "";
String src = Output.subtree2src(srcT);
String tar = Output.subtree2src(dstT);
buffer = src+"\t"+tar;
if(buffer.contains("error")&&buffer.contains("situation"))
return null;
return buffer;
}
static private void printTxt(String outPath, String outPath1, String outPath2, String buffer) throws Exception {
if(buffer==null)
return;
File output = new File(outPath);
BufferedWriter wr = new BufferedWriter(new FileWriter(output, true));
File output1 = new File(outPath1);
File output2 = new File(outPath2);
BufferedWriter wr1 = new BufferedWriter(new FileWriter(output1, true));
BufferedWriter wr2 = new BufferedWriter(new FileWriter(output2, true));
String src = buffer.split("\t")[0];
String dst = buffer.split("\t")[1];
wr.append(buffer);
wr.newLine();
wr.flush();
wr1.append(src);
wr1.newLine();
wr1.flush();
wr2.append(dst);
wr2.newLine();
wr2.flush();
wr.close();
wr1.close();
wr2.close();
}
static private void printJson(String jpath, TreeContext srcT, TreeContext dstT) throws Exception {
File dir = new File(jpath);
if(!dir.exists()) {
dir.mkdirs();
}
File[] files = dir.listFiles();
int fileSize = files.length;
if(srcT!=null) {
String out = jpath+"pair"+String.valueOf(fileSize/2)+"_src.json";
BufferedWriter wr = new BufferedWriter(new FileWriter(new File(out)));
wr.append(TreeIoUtils.toJson(srcT).toString());
wr.flush();
wr.close();
}
if(dstT!=null) {
String out1 = jpath+"pair"+String.valueOf(fileSize/2)+"_tgt.json";
BufferedWriter wr1 = new BufferedWriter(new FileWriter(new File(out1)));
wr1.append(TreeIoUtils.toJson(dstT).toString());
wr1.flush();
wr1.close();
}
}
}
| 21,783 | 34.363636 | 113 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/FilterDefuse2.java
|
package collect;
import gumtreediff.actions.ActionGenerator;
import gumtreediff.actions.model.Action;
import gumtreediff.io.TreeIoUtils;
import gumtreediff.matchers.MappingStore;
import gumtreediff.matchers.Matcher;
import gumtreediff.matchers.Matchers;
import gumtreediff.tree.ITree;
import gumtreediff.tree.TreeContext;
import split.Split;
import structure.API;
import structure.Definition;
import structure.Migration;
import structure.SubTree;
import utils.Defuse;
import utils.Output;
import utils.Utils;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.*;
/*
* statement-level single line extraction
*/
public class FilterDefuse {
private static LinkedHashSet<API> apis = new LinkedHashSet<API>();
private static HashMap<SubTree, SubTree> treePairs = new HashMap<SubTree, SubTree>();
private static int count = 0;
public static void main (String args[]) throws Exception{
String path = "I:\\20210714-Srqtrans_testcase\\Vulnerability_testcase\\";
String outMode = "txt";
String numDir = "data_num\\";
String checkDir = "data_check\\";
String varDir = "data_var\\";
multiCollect(path, outMode, numDir);
// String cpPath = path+"cp48";
// FilterDefuse defuse = new FilterDefuse();
// defuse.collectDiffwithDefUse(cpPath, outMode, true, true, "");
}
public static void multiCollect(String path, String outMode, String numDir) throws Exception {
// if(outMode.equals("txt"))
// FileOperation.delAllFile(dataDir);
// if(outMode.equals("lineNum")) {
// FileOperation.delAllFile(numDir);
// FileOperation.delAllFile(varDir);
// FileOperation.delAllFile(checkDir);
// }
if(outMode.equals("json")) {
String jpath = "jsons\\";
File jFile = new File(jpath);
if(!jFile.exists())
jFile.mkdirs();
if(jFile.listFiles().length!=0&&outMode.equals("json"))
throw new Exception("pls clean dir!");
}
ArrayList<String> existList = checkExist(numDir);
File rootFile = new File(path);
File[] fileList = rootFile.listFiles();
System.out.println(fileList.length);
for(int i=0;i<fileList.length;i++) {
File cpFile = fileList[i];
System.out.println(i+":"+cpFile.getName());
if(existList.contains(cpFile.getName()))
continue;
String cpPath = cpFile.getAbsolutePath();
FilterDefuse defuse = new FilterDefuse();
defuse.collectDiffwithDefUse(cpPath, outMode, true, true, "");
}
System.out.println("DuplicateNum:"+count);
}
public void collectDiffwithDefUse(String path, String outMode,
Boolean ifOnlyChange, Boolean ifPrintDef, String filter) throws Exception {//获取DefUse
Split sp = new Split();
ArrayList<Migration> migrats = FileFilter.readMigrationList(path, filter);
String repoName = "";
if(migrats.size()!=0)
repoName = migrats.get(0).getRepoName();
else
return;
String txtName = (new File(path)).getName();
String jpath = "jsons\\";
File jFile = new File(jpath);
if(!jFile.exists())
jFile.mkdirs();
String outPath = "data\\defuse_"+txtName+".txt";
String outPath1 = "data\\src-val_"+txtName+".txt";
String outPath2 = "data\\tgt-val_"+txtName+".txt";
String outPath3 = "data_num\\"+repoName+"_"+txtName+".txt";
String outPath4 = "data_var\\"+repoName+"_"+txtName+"_defs_src.txt";
String outPath5 = "data_var\\"+repoName+"_"+txtName+"_defs_dst.txt";
String outPath6 = "data_check\\"+repoName+"_"+txtName+".txt";
int errCount = 0;
for(Migration migrat : migrats) {
Defuse defuse = new Defuse();
String miName_src = migrat.getMiName_src();
String miName_dst = migrat.getMiName_dst();
TreeContext sTC = migrat.getSrcT();
TreeContext dTC = migrat.getDstT();
MappingStore mappings = migrat.getMappings();
HashMap<ITree, ITree> leaf2parblock_map_src = defuse.searchBlockMap(sTC);
HashMap<ITree, ITree> leaf2parblock_map_dst = defuse.searchBlockMap(dTC);
System.out.println("Analyse:"+miName_src);
ArrayList<SubTree> changedSTree = new ArrayList<>();
HashMap<String, LinkedList<Action>> actions = Utils.collectAction(sTC, dTC, mappings);
System.out.println("111");
ArrayList<Integer> srcActIds = Utils.collectSrcActNodeIds(sTC, dTC, mappings, actions);
System.out.println("2222");
// if(srcActIds.contains(2333)) {
// System.out.println("indeed");
// }else {
// System.out.println("not contains");
// }
ArrayList<Definition> defs1 = defuse.getDef(sTC, "src");//先计算action,再收集defs
ArrayList<Definition> defs2 = defuse.getDef(dTC, "tgt");
HashMap<String, ArrayList<Definition>> defMap1 = defuse.transferDefs(defs1);
HashMap<String, ArrayList<Definition>> defMap2 = defuse.transferDefs(defs2);
HashMap<ITree, ArrayList<Definition>> blockMap1 = defuse.transferBlockMap(defs1, sTC, "src");
HashMap<ITree, ArrayList<Definition>> blockMap2 = defuse.transferBlockMap(defs2, dTC, "tgt");
ArrayList<SubTree> sub1 = sp.splitSubTree(sTC, miName_src);//Subtree中割裂过block,注意
ArrayList<SubTree> sub2 = sp.splitSubTree(dTC, miName_src);//先计算action,再split ST
HashMap<Integer, HashMap<String, String>> usedDefs2Map = new HashMap<Integer, HashMap<String, String>>();
System.out.println("def1size:"+defs1.size());
System.out.println("def2size:"+defs2.size());
System.out.println("def1Mapsize:"+defMap1.size());
System.out.println("def2Mapsize:"+defMap2.size());
System.out.println("block1size:"+blockMap1.size());
System.out.println("block2size:"+blockMap2.size());
// for(SubTree st : sub1) {
// ITree root = st.getRoot();
// System.err.println("StID:"+root.getId());
// }
if(ifOnlyChange==true) {
for(SubTree st : sub1) {
ITree t = st.getRoot();
// System.out.println("StID:"+t.getId());
List<ITree> nodeList = t.getDescendants();
nodeList.add(t);
// for(ITree node : nodeList) {
// int id = node.getId();
// System.out.println("nodeid:"+id);
// }
for(ITree node : nodeList) {
int id = node.getId();
if(srcActIds.contains(id)) {
changedSTree.add(st);
// System.out.println("find a action subtree!"+t.getId());
break;
}
}
}//先找包含action的subtree
}else {
changedSTree = sub1;
}
System.out.println("subSize:"+sub1.size());
System.out.println("changeSize:"+changedSTree.size());
for(SubTree srcT : changedSTree) {
// System.out.println("===================");
HashMap<String, String> replaceMap_src = new HashMap<String, String>();
HashMap<String, String> replaceMap_dst = new HashMap<String, String>();
HashSet<Definition> usedDefs1 = new HashSet<Definition>();
HashSet<Definition> usedDefs2 = new HashSet<Definition>();
ITree sRoot = srcT.getRoot();
// System.out.println("CheckMapping "+sRoot.getId()+":"+srcT.getMiName());
String src = Output.subtree2src(srcT);
if(outMode.equals("txt")&&(src.contains("error")&&src.contains("situation"))) {
errCount++;
continue;
}
Boolean same = false;
ArrayList<ITree> leaves1 = new ArrayList<ITree>();
Utils.traverse2Leaf(sRoot, leaves1);
int labelCount = 0;
for(ITree leaf : leaves1) {
String label = leaf.getLabel();
// System.out.println("label:"+label);
if(!label.equals(""))
labelCount++;
String type = sTC.getTypeLabel(leaf);
if(type.equals("literal")) {
leaf.setLabel(Output.deleteLiteral(leaf, sTC));
// if(label.contains("\""))
// replaceMap_src.put("@@"+label+"@@", "None");
// else
// replaceMap_src.put("$$"+label+"$$", "num");//replace Literal
}
ArrayList<Definition> stringList = defMap1.get(label);
if(stringList!=null) {
ITree parBlock = leaf2parblock_map_src.get(leaf);
ArrayList<Definition> blockList = blockMap1.get(parBlock);
for(Definition def1 : stringList) {
if(blockList!=null) {
if(blockList.contains(def1)) {
if(leaf.getId()>def1.getDefLabelID()) {
usedDefs1.add(def1);
System.out.println("DefTest: "+leaf.getLabel()+","+leaf.getId()+","+def1.getDefLabelID());
// leaf.setLabel("var");
replaceMap_src.put(label, label);
}
}
}
if(def1.getDefLabelID()==leaf.getId()) {
// leaf.setLabel("var");
replaceMap_src.put(label, label);
}
// System.out.println(leaf.getId()+","+def1.getDefLabelID());
// System.out.println("Def:"+def1.getType()+","+def1.getVarName());
}
}
}
if(labelCount==0) {
System.err.println("labelCount is 0");
continue;
}
SubTree dstT = defuse.checkMapping(srcT, mappings, dTC, sub2);
if(dstT==null) {
System.err.println("no dstT searched");
continue;//子树没有对应子树,被删除
}
ITree dRoot = dstT.getRoot();
// System.out.println(sRoot.getId()+"->"+dRoot.getId());
List<ITree> nodes1 = sRoot.getDescendants();
nodes1.add(sRoot);//srcT所有节点
List<ITree> nodes2 = dRoot.getDescendants();
nodes2.add(dRoot);//dstT所有节点
int sBeginLine = 0;
int sLastLine = 0;
int sBeginCol = 0;
int sLastCol = 0;
int dBeginLine = 0;
int dLastLine = 0;
int dBeginCol = 0;
int dLastCol = 0;
if(outMode.equals("lineNum")) {
for(ITree node : nodes1) {
int line = node.getLine();
int col = node.getColumn();
int lastLine = node.getLastLine();
int lastCol = node.getLastColumn();
String type = sTC.getTypeLabel(node);
if(!type.equals("block")) {//跳过block节点,该节点会导致lastline为大括号结束位置
if(sBeginLine==0&&line!=0) {
sBeginLine = line;
}else if(line < sBeginLine&&line!=0) {
sBeginLine = line;
}//begin line
if(sBeginCol==0&&col!=0) {
sBeginCol = col;
}else if(col < sBeginCol&&col!=0) {
sBeginCol = col;
}//begin col
if(lastLine > sLastLine) {
sLastLine = lastLine;
}//last line
if(lastCol > sLastCol) {
sLastCol = lastCol;
}//last col
// if(sRoot.getId()==16329) {
// System.err.println(node.getId()+type+":"+line+","+lastLine+","+col+","+lastCol);
// }
}else if(type.equals("empty_stmt"))//特殊情况
continue;
}
for(ITree node : nodes2) {
int line = node.getLine();
int col = node.getColumn();
int lastLine = node.getLastLine();
int lastCol = node.getLastColumn();
String type = sTC.getTypeLabel(node);
if(!type.equals("block")) {//跳过block节点,该节点会导致lastline为大括号结束位置
if(dBeginLine==0&&line!=0) {
dBeginLine = line;
}else if(line < dBeginLine&&line!=0) {
dBeginLine = line;
}//begin line
if(dBeginCol==0&&col!=0) {
dBeginCol = col;
}else if(col < dBeginCol&&col!=0) {
dBeginCol = col;
}//begin col
if(dLastLine < lastLine) {
dLastLine = lastLine;
}//last line
if(dLastCol < lastCol) {
dLastCol = lastCol;
}//last col
}else if(type.equals("empty_stmt"))//特殊情况
continue;
}
}
if(usedDefs2Map.get(dRoot.getId())==null) {
ArrayList<ITree> leaves2 = new ArrayList<ITree>();
Utils.traverse2Leaf(dRoot, leaves2);
for(ITree leaf : leaves2) {
String label = leaf.getLabel();
String type = dTC.getTypeLabel(leaf);
if(type.equals("literal")) {
leaf.setLabel(Output.deleteLiteral(leaf, dTC));
// if(label.contains("\""))
// replaceMap_dst.put("@@"+label+"@@", "None");
// else
// replaceMap_dst.put("$$"+label+"$$", "num");//replace Literal
}
ArrayList<Definition> stringList = defMap2.get(label);
if(stringList!=null) {
ITree parBlock = leaf2parblock_map_dst.get(leaf);
ArrayList<Definition> blockList = blockMap2.get(parBlock);
for(Definition def2 : stringList) {
if(blockList!=null) {
if(blockList.contains(def2)) {
if(leaf.getId()>def2.getDefLabelID()) {
usedDefs2.add(def2);
// leaf.setLabel("var");
replaceMap_dst.put(label, label);
}
// System.out.println(leaf.getId()+","+def2.getDefLabelID());
// System.out.println(def2.getType()+","+def2.getVarName());
}
}
if(def2.getDefLabelID()==leaf.getId()) {
// leaf.setLabel("var");
replaceMap_dst.put(label, label);
}
}
}
if(same==false) {
for(ITree leaf1 : leaves1) {
String label1 = leaf1.getLabel();
if(label.equals(label1)) {
same = true;
}
}
}
}
usedDefs2Map.put(dRoot.getId(), replaceMap_dst);
}else {
same = true;
replaceMap_dst = usedDefs2Map.get(dRoot.getId());
}//发现有不同subtree_src映射到同一subTree_dst情况,matching算法问题暂时无法解决
//处理措施为直接复制一份replaceMap_dst,跳过
src = Output.subtree2src(srcT);
String tar = Output.subtree2src(dstT);
if(outMode.equals("txt")) {
if(tar.contains("error")&&tar.contains("situation")) {
errCount++;
continue;
}
if(((float)src.length()/(float)tar.length())<0.25||((float)tar.length()/(float)src.length())<0.25) {
continue;
}//长度相差太多的句子直接跳过
if(ifOnlyChange==true) {
if(src.equals(tar))
continue;
}//去掉相同句子
}
if(same==false) {
System.err.println("No leaf is the same");
continue;//no leaf is the same
}
if(outMode.equals("txt")) {
if(ifPrintDef==true) {
printDefs(usedDefs1);
String buffer = getDefTxt(usedDefs1, usedDefs2, sTC, dTC, srcT, dstT);
printTxt(outPath, outPath1, outPath2, buffer);
}else {
String buffer = getText(sTC, dTC, srcT, dstT);
printTxt(outPath, outPath1, outPath2, buffer);
}
}else if(outMode.equals("json")) {
srcT = absTree(srcT);
dstT = absTree(dstT);
TreeContext st = defuse.buildTC(srcT);
TreeContext dt = defuse.buildTC(dstT);
if(checkSim(st, dt)==false) {
printJson(jpath, st, dt);
treePairs.put(srcT, dstT);
}
}else if(outMode.equals("lineNum")) {
// if(sRoot.getId()==806) {
// for(Map.Entry<String, String> entry : replaceMap_src.entrySet()) {
// String varName = entry.getKey();
// String label = entry.getValue();
// System.err.println(varName+"->"+label+";");
// }
// }
String diffLine_check = "STID:"+srcT.getRoot().getId()+","
+sBeginLine+","+sLastLine+","+sBeginCol+","+sLastCol+"->"
+dBeginLine+","+dLastLine+","+dBeginCol+","+dLastCol;
String diffLine = miName_src+";"+miName_dst+";"
+sBeginLine+","+sLastLine+","+sBeginCol+","+sLastCol+"->"
+dBeginLine+","+dLastLine+","+dBeginCol+","+dLastCol;
printLineNum(outPath3, outPath4, outPath5, diffLine, replaceMap_src, replaceMap_dst);
printLineCheck(outPath6, diffLine_check);
}
}
}
System.out.println("errCount:"+errCount);
}
static private void printLineCheck(String outPath6, String diffLine_check) throws IOException {
File output6 = new File(outPath6);
BufferedWriter wr6 = new BufferedWriter(new FileWriter(output6, true));
wr6.append(diffLine_check);
wr6.newLine();
wr6.flush();
wr6.close();
}
static private void printLineNum(String outPath3, String outPath4, String outPath5, String diffLine,
HashMap<String , String> replaceMap_src, HashMap<String , String> replaceMap_dst) throws Exception {
File output3 = new File(outPath3);
BufferedWriter wr3 = new BufferedWriter(new FileWriter(output3, true));
File output4 = new File(outPath4);
BufferedWriter wr4 = new BufferedWriter(new FileWriter(output4, true));
File output5 = new File(outPath5);
BufferedWriter wr5 = new BufferedWriter(new FileWriter(output5, true));
wr3.append(diffLine);
wr3.newLine();
wr3.flush();
for(Map.Entry<String, String> entry : replaceMap_src.entrySet()) {
String varName = entry.getKey();
String label = entry.getValue();
wr4.append(varName+"->"+label+";");
}
wr4.newLine();
wr4.flush();
// System.out.println("STID:"+srcT.getRoot().getId()+","+dstT.getRoot().getId());
// System.out.println(replaceMap_dst.size());
for(Map.Entry<String, String> entry : replaceMap_dst.entrySet()) {
String varName = entry.getKey();
String label = entry.getValue();
wr5.append(varName+"->"+label+";");
}
wr5.newLine();
wr5.flush();
wr3.close();
wr4.close();
wr5.close();
}
static private void printDefs(HashSet<Definition> defs) {
System.out.println("usedDefs1:"+defs.size());
for(Definition def : defs) {
String label = def.getVarName();
System.out.println(label);
}
}
static private String getDefTxt(HashSet<Definition> usedDefs1, HashSet<Definition> usedDefs2,
TreeContext tc1, TreeContext tc2, SubTree srcT, SubTree dstT) throws Exception {
String buffer = "";
for(Definition def : usedDefs1) {
SubTree st = new SubTree(def.getRoot(), tc1, 0, "");
String stat = Output.subtree2src(st);
buffer = buffer +stat+" ; ";
}
String src = Output.subtree2src(srcT);
buffer = buffer + src+"\t";
for(Definition def : usedDefs2) {
SubTree st = new SubTree(def.getRoot(), tc2, 0, "");
String stat = Output.subtree2src(st);
buffer += stat+" ; ";
}
String tar = Output.subtree2src(dstT);
buffer += tar;
if(buffer.contains("error")&&buffer.contains("situation"))
return null;
return buffer;
}
static private SubTree absTree(SubTree st) {
ITree root = st.getRoot();
List<ITree> desList = root.getDescendants();
for(ITree node : desList) {
String label = node.getLabel();
try {
Integer.parseInt(label);
node.setLabel("num");
} catch (Exception e) {
// TODO: handle exception
}
}
return st;
}
static private ArrayList<String> checkExist(String outPath){
ArrayList<String> existList = new ArrayList<String>();
File outDir = new File(outPath);
File[] cpFiles = outDir.listFiles();
System.out.println(cpFiles.length);
for(File cpFile : cpFiles) {
String name = cpFile.getName();
String[] tmp = name.split("\\.")[0].split("_");
String cpNum = tmp[tmp.length-1];
existList.add(cpNum);
}
return existList;
}//断点重新开始任务用
static private Boolean checkSim(TreeContext tc1, TreeContext tc2) {
Boolean full_sim = false;
Defuse defuse = new Defuse();
for(Map.Entry<SubTree, SubTree> entry : treePairs.entrySet()) {
SubTree st1 = entry.getKey();
SubTree st2 = entry.getValue();
try {
TreeContext tc1_used = defuse.buildTC(st1);
TreeContext tc2_used = defuse.buildTC(st2);
Matcher m1 = Matchers.getInstance().getMatcher(tc1.getRoot(), tc1_used.getRoot());
m1.match();
MappingStore mappings1 = m1.getMappings();
ActionGenerator g1 = new ActionGenerator(tc1.getRoot(), tc1_used.getRoot(), mappings1);
List<Action> actions1 = g1.generate();
Matcher m2 = Matchers.getInstance().getMatcher(tc2.getRoot(), tc2_used.getRoot());
m2.match();
MappingStore mappings2 = m2.getMappings();
ActionGenerator g2 = new ActionGenerator(tc2.getRoot(), tc2_used.getRoot(), mappings2);
List<Action> actions2 = g2.generate();
if(actions1.size()==0&&actions2.size()==0) {
full_sim = true;
count++;
return full_sim;
}
} catch (Exception e) {
continue;// TODO: handle exception
}
}
return full_sim;
}
static private String getText(TreeContext tc1, TreeContext tc2, SubTree srcT, SubTree dstT) throws Exception {
String buffer = "";
String src = Output.subtree2src(srcT);
String tar = Output.subtree2src(dstT);
buffer = src+"\t"+tar;
if(buffer.contains("error")&&buffer.contains("situation"))
return null;
return buffer;
}
static private void printTxt(String outPath, String outPath1, String outPath2, String buffer) throws Exception {
if(buffer==null)
return;
File output = new File(outPath);
BufferedWriter wr = new BufferedWriter(new FileWriter(output, true));
File output1 = new File(outPath1);
File output2 = new File(outPath2);
BufferedWriter wr1 = new BufferedWriter(new FileWriter(output1, true));
BufferedWriter wr2 = new BufferedWriter(new FileWriter(output2, true));
String src = buffer.split("\t")[0];
String dst = buffer.split("\t")[1];
wr.append(buffer);
wr.newLine();
wr.flush();
wr1.append(src);
wr1.newLine();
wr1.flush();
wr2.append(dst);
wr2.newLine();
wr2.flush();
wr.close();
wr1.close();
wr2.close();
}
static private void printJson(String jpath, TreeContext srcT, TreeContext dstT) throws Exception {
File dir = new File(jpath);
if(!dir.exists()) {
dir.mkdirs();
}
File[] files = dir.listFiles();
int fileSize = files.length;
if(srcT!=null) {
String out = jpath+"pair"+String.valueOf(fileSize/2)+"_src.json";
BufferedWriter wr = new BufferedWriter(new FileWriter(new File(out)));
wr.append(TreeIoUtils.toJson(srcT).toString());
wr.flush();
wr.close();
}
if(dstT!=null) {
String out1 = jpath+"pair"+String.valueOf(fileSize/2)+"_tgt.json";
BufferedWriter wr1 = new BufferedWriter(new FileWriter(new File(out1)));
wr1.append(TreeIoUtils.toJson(dstT).toString());
wr1.flush();
wr1.close();
}
}
}
| 21,783 | 34.363636 | 113 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/example1.java
|
public class PublishedAddressPolicy{
private HashMap<Integer, Integer> portMapping = new HashMap<Integer, Integer>();
}
| 120 | 39.333333 | 81 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/example2.java
|
public class PublishedAddressPolicy{
private Map<Integer, Integer> portMapping = new HashMap<Integer, Integer>();
private Map<Integer, Integer> hostMapping = new HashMap<String, String>();
}
| 192 | 47.25 | 77 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/test.java
|
public class Test {
public void foo() {
if (!fields.isEmpty()) {
//impl
}
}
}
| 104 | 12.125 | 32 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/test2.java
|
public class Test {
public void foo() {
if (iterator.hasNext()) {
//impl
}
}
}
| 105 | 12.25 | 33 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/apted/costmodel/CostModel.java
|
/* MIT License
*
* Copyright (c) 2017 Mateusz Pawlik
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package apted.costmodel;
import apted.node.Node;
/**
* This interface specifies the methods to implement for a custom cost model.
* The methods represent the costs of edit operations (delete, insert, rename).
*
* <p>If the cost function is a metric, the tree edit distance is a metric too.
*
* <p>However, the cost function does not have to be a metric - the costs of
* deletion, insertion and rename can be arbitrary.
*
* <p>IMPORTANT: Mind the <b>float</b> type use for costs.
*
* @param <D> type of node data on which the cost model is defined.
*/
public interface CostModel<D> {
/**
* Calculates the cost of deleting a node.
*
* @param n the node considered to be deleted.
* @return the cost of deleting node n.
*/
public float del(Node<D> n);
/**
* Calculates the cost of inserting a node.
*
* @param n the node considered to be inserted.
* @return the cost of inserting node n.
*/
public float ins(Node<D> n);
/**
* Calculates the cost of renaming (mapping) two nodes.
*
* @param n1 the source node of rename.
* @param n2 the destination node of rename.
* @return the cost of renaming (mapping) node n1 to n2.
*/
public float ren(Node<D> n1, Node<D> n2);
}
| 2,375 | 33.941176 | 80 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/apted/costmodel/PerEditOperationStringNodeDataCostModel.java
|
/* MIT License
*
* Copyright (c) 2017 Mateusz Pawlik
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package apted.costmodel;
import apted.node.Node;
import apted.node.StringNodeData;
/**
* This is a cost model defined on {@link node.StringNodeData} with a fixed cost
* per edit operation.
*/
public class PerEditOperationStringNodeDataCostModel implements CostModel<StringNodeData> {
/**
* Stores the cost of deleting a node.
*/
private float delCost;
/**
* Stores the cost of inserting a node.
*/
private float insCost;
/**
* Stores the cost of mapping two nodes (renaming their labels).
*/
private float renCost;
/**
* Initialises the cost model with the passed edit operation costs.
*
* @param delCost deletion cost.
* @param insCost insertion cost.
* @param renCost rename cost.
*/
public PerEditOperationStringNodeDataCostModel(float delCost, float insCost, float renCost) {
this.delCost = delCost;
this.insCost = insCost;
this.renCost = renCost;
}
/**
* Calculates the cost of deleting a node.
*
* @param n the node considered to be deleted.
* @return the cost of deleting node n.
*/
@Override
public float del(Node<StringNodeData> n) {
return delCost;
}
/**
* Calculates the cost of inserting a node.
*
* @param n the node considered to be inserted.
* @return the cost of inserting node n.
*/
@Override
public float ins(Node<StringNodeData> n) {
return insCost;
}
/**
* Calculates the cost of renaming the string labels of two nodes.
*
* @param n1 the source node of rename.
* @param n2 the destination node of rename.
* @return the cost of renaming node n1 to n2.
*/
@Override
public float ren(Node<StringNodeData> n1, Node<StringNodeData> n2) {
return (n1.getNodeData().getLabel().equals(n2.getNodeData().getLabel())) ? 0.0f : renCost;
}
}
| 2,940 | 29.319588 | 95 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/apted/costmodel/StringUnitCostModel.java
|
/* MIT License
*
* Copyright (c) 2017 Mateusz Pawlik
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package apted.costmodel;
import apted.node.Node;
import apted.node.StringNodeData;
/**
* This is a unit-nost model defined on string labels.
*
* @see CostModel
* @see StringNodeData
*/
// TODO: Use a label dictionary to encode string labels with integers for
// faster rename cost computation.
public class StringUnitCostModel implements CostModel<StringNodeData> {
/**
* Calculates the cost of deleting a node.
*
* @param n a node considered to be deleted.
* @return {@code 1} - a fixed cost of deleting a node.
*/
@Override
public float del(Node<StringNodeData> n) {
return 1.0f;
}
/**
* Calculates the cost of inserting a node.
*
* @param n a node considered to be inserted.
* @return {@code 1} - a fixed cost of inserting a node.
*/
@Override
public float ins(Node<StringNodeData> n) {
return 1.0f;
}
/**
* Calculates the cost of renaming the label of the source node to the label
* of the destination node.
*
* @param n1 a source node for rename.
* @param n2 a destination node for rename.
* @return {@code 1} if labels of renamed nodes are equal, and {@code 0} otherwise.
*/
@Override
public float ren(Node<StringNodeData> n1, Node<StringNodeData> n2) {
return (n1.getNodeData().getLabel().equals(n2.getNodeData().getLabel())) ? 0.0f : 1.0f;
}
}
| 2,486 | 32.608108 | 91 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/apted/distance/APTED.java
|
/* MIT License
*
* Copyright (c) 2017 Mateusz Pawlik
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package apted.distance;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.Stack;
import apted.costmodel.CostModel;
import apted.node.Node;
import apted.node.NodeIndexer;
/**
* Implements APTED algorithm [1,2].
*
* <ul>
* <li>Optimal strategy with all paths.
* <li>Single-node single path function supports currently only unit cost.
* <li>Two-node single path function not included.
* <li>\Delta^L and \Delta^R based on Zhang and Shasha's algorithm for executing
* left and right paths (as in [3]). If only left and right paths are used
* in the strategy, the memory usage is reduced by one quadratic array.
* <li>For any other path \Delta^A from [1] is used.
* </ul>
*
* References:
* <ul>
* <li>[1] M. Pawlik and N. Augsten. Efficient Computation of the Tree Edit
* Distance. ACM Transactions on Database Systems (TODS) 40(1). 2015.
* <li>[2] M. Pawlik and N. Augsten. Tree edit distance: Robust and memory-
* efficient. Information Systems 56. 2016.
* <li>[3] M. Pawlik and N. Augsten. RTED: A Robust Algorithm for the Tree Edit
* Distance. PVLDB 5(4). 2011.
* </ul>
*
* @param <C> type of cost model.
* @param <D> type of node data.
*/
public class APTED<C extends CostModel, D> {
/**
* Identifier of left path type = {@value LEFT};
*/
private static final byte LEFT = 0;
/**
* Identifier of right path type = {@value RIGHT};
*/
private static final byte RIGHT = 1;
/**
* Identifier of inner path type = {@value INNER};
*/
private static final byte INNER = 2;
/**
* Indexer of the source tree.
*
* @see node.NodeIndexer
*/
private NodeIndexer it1;
/**
* Indexer of the destination tree.
*
* @see node.NodeIndexer
*/
private NodeIndexer it2;
/**
* The size of the source input tree.
*/
private int size1;
/**
* The size of the destination tree.
*/
private int size2;
/**
* The distance matrix [1, Sections 3.4,8.2,8.3]. Used to store intermediate
* distances between pairs of subtrees.
*/
private float delta[][];
/**
* One of distance arrays to store intermediate distances in spfA.
*/
// TODO: Verify if other spf-local arrays are initialised within spf. If yes,
// move q to spf to - then, an offset has to be used to access it.
private float q[];
/**
* Array used in the algorithm before [1]. Using it does not change the
* complexity.
*
* <p>TODO: Do not use it [1, Section 8.4].
*/
private int fn[];
/**
* Array used in the algorithm before [1]. Using it does not change the
* complexity.
*
* <p>TODO: Do not use it [1, Section 8.4].
*/
private int ft[];
/**
* Stores the number of subproblems encountered while computing the distance
* [1, Section 10].
*/
private long counter;
/**
* Cost model to be used for calculating costs of edit operations.
*/
private C costModel;
/**
* Constructs the APTED algorithm object with the specified cost model.
*
* @param costModel cost model for edit operations.
*/
public APTED(C costModel) {
this.costModel = costModel;
}
/**
* Compute tree edit distance between source and destination trees using
* APTED algorithm [1,2].
*
* @param t1 source tree.
* @param t2 destination tree.
* @return tree edit distance.
*/
public float computeEditDistance(Node<D> t1, Node<D> t2) {
// Index the nodes of both input trees.
init(t1, t2);
// Determine the optimal strategy for the distance computation.
// Use the heuristic from [2, Section 5.3].
if (it1.lchl < it1.rchl) {
delta = computeOptStrategy_postL(it1, it2);
} else {
delta = computeOptStrategy_postR(it1, it2);
}
// Initialise structures for distance computation.
tedInit();
// Compute the distance.
return gted(it1, it2);
}
/**
* This method is only for testing purspose. It computes TED with a fixed
* path type in the strategy to trigger execution of a specific single-path
* function.
*
* @param t1 source tree.
* @param t2 destination tree.
* @param spfType single-path function to trigger (LEFT or RIGHT).
* @return tree edit distance.
*/
public float computeEditDistance_spfTest(Node<D> t1, Node<D> t2, int spfType) {
// Index the nodes of both input trees.
init(t1, t2);
// Initialise delta array.
delta = new float[size1][size2];
// Fix a path type to trigger specific spf.
for (int i = 0; i < delta.length; i++) {
for (int j = 0; j < delta[i].length; j++) {
// Fix path type.
if (spfType == LEFT) {
delta[i][j] = it1.preL_to_lld(i) + 1;
} else if (spfType == RIGHT) {
delta[i][j] = it1.preL_to_rld(i) + 1;
}
}
}
// Initialise structures for distance computation.
tedInit();
// Compute the distance.
return gted(it1, it2);
}
/**
* Initialises node indexers and stores input tree sizes.
*
* @param t1 source input tree.
* @param t2 destination input tree.
*/
public void init(Node<D> t1, Node<D> t2) {
it1 = new NodeIndexer(t1, costModel);
it2 = new NodeIndexer(t2, costModel);
size1 = it1.getSize();
size2 = it2.getSize();
}
/**
* After the optimal strategy is computed, initialises distances of deleting
* and inserting subtrees without their root nodes.
*/
private void tedInit() {
// Reset the subproblems counter.
counter = 0L;
// Initialize arrays.
int maxSize = Math.max(size1, size2) + 1;
// TODO: Move q initialisation to spfA.
q = new float[maxSize];
// TODO: Do not use fn and ft arrays [1, Section 8.4].
fn = new int[maxSize + 1];
ft = new int[maxSize + 1];
// Compute subtree distances without the root nodes when one of subtrees
// is a single node.
int sizeX = -1;
int sizeY = -1;
int parentX = -1;
int parentY = -1;
// Loop over the nodes in reversed left-to-right preorder.
for(int x = 0; x < size1; x++) {
sizeX = it1.sizes[x];
parentX = it1.parents[x];
for(int y = 0; y < size2; y++) {
sizeY = it2.sizes[y];
parentY = it2.parents[y];
// Set values in delta based on the sums of deletion and insertion
// costs. Substract the costs for root nodes.
// In this method we don't have to verify the order of the input trees
// because it is equal to the original.
if (sizeX == 1 && sizeY == 1) {
delta[x][y] = 0.0f;
} else if (sizeX == 1) {
delta[x][y] = it2.preL_to_sumInsCost[y] - costModel.ins(it2.preL_to_node[y]); // USE COST MODEL.
} else if (sizeY == 1) {
delta[x][y] = it1.preL_to_sumDelCost[x] - costModel.del(it1.preL_to_node[x]); // USE COST MODEL.
}
}
}
}
/**
* Compute the optimal strategy using left-to-right postorder traversal of
* the nodes [2, Algorithm 1].
*
* @param it1 node indexer of the source input tree.
* @param it2 node indexer of the destination input tree.
* @return array with the optimal strategy.
*/
// TODO: Document the internals. Point to lines of the lagorithm.
public float[][] computeOptStrategy_postL(NodeIndexer it1, NodeIndexer it2) {
int size1 = it1.getSize();
int size2 = it2.getSize();
float strategy[][] = new float[size1][size2];
float cost1_L[][] = new float[size1][];
float cost1_R[][] = new float[size1][];
float cost1_I[][] = new float[size1][];
float cost2_L[] = new float[size2];
float cost2_R[] = new float[size2];
float cost2_I[] = new float[size2];
int cost2_path[] = new int[size2];
float leafRow[] = new float[size2];
int pathIDOffset = size1;
float minCost = 0x7fffffffffffffffL;
int strategyPath = -1;
int[] pre2size1 = it1.sizes;
int[] pre2size2 = it2.sizes;
int[] pre2descSum1 = it1.preL_to_desc_sum;
int[] pre2descSum2 = it2.preL_to_desc_sum;
int[] pre2krSum1 = it1.preL_to_kr_sum;
int[] pre2krSum2 = it2.preL_to_kr_sum;
int[] pre2revkrSum1 = it1.preL_to_rev_kr_sum;
int[] pre2revkrSum2 = it2.preL_to_rev_kr_sum;
int[] preL_to_preR_1 = it1.preL_to_preR;
int[] preL_to_preR_2 = it2.preL_to_preR;
int[] preR_to_preL_1 = it1.preR_to_preL;
int[] preR_to_preL_2 = it2.preR_to_preL;
int[] pre2parent1 = it1.parents;
int[] pre2parent2 = it2.parents;
boolean[] nodeType_L_1 = it1.nodeType_L;
boolean[] nodeType_L_2 = it2.nodeType_L;
boolean[] nodeType_R_1 = it1.nodeType_R;
boolean[] nodeType_R_2 = it2.nodeType_R;
int[] preL_to_postL_1 = it1.preL_to_postL;
int[] preL_to_postL_2 = it2.preL_to_postL;
int[] postL_to_preL_1 = it1.postL_to_preL;
int[] postL_to_preL_2 = it2.postL_to_preL;
int size_v, parent_v_preL, parent_w_preL, parent_w_postL = -1, size_w, parent_v_postL = -1;
int leftPath_v, rightPath_v;
float[] cost_Lpointer_v, cost_Rpointer_v, cost_Ipointer_v;
float[] strategypointer_v;
float[] cost_Lpointer_parent_v = null, cost_Rpointer_parent_v = null, cost_Ipointer_parent_v = null;
float[] strategypointer_parent_v = null;
int krSum_v, revkrSum_v, descSum_v;
boolean is_v_leaf;
int v_in_preL;
int w_in_preL;
Stack<float[]> rowsToReuse_L = new Stack<>();
Stack<float[]> rowsToReuse_R = new Stack<>();
Stack<float[]> rowsToReuse_I = new Stack<>();
for(int v = 0; v < size1; v++) {
v_in_preL = postL_to_preL_1[v];
is_v_leaf = it1.isLeaf(v_in_preL);
parent_v_preL = pre2parent1[v_in_preL];
if (parent_v_preL != -1) {
parent_v_postL = preL_to_postL_1[parent_v_preL];
}
strategypointer_v = strategy[v_in_preL];
size_v = pre2size1[v_in_preL];
leftPath_v = -(preR_to_preL_1[preL_to_preR_1[v_in_preL] + size_v - 1] + 1);// this is the left path's ID which is the leftmost leaf node: l-r_preorder(r-l_preorder(v) + |Fv| - 1)
rightPath_v = v_in_preL + size_v - 1 + 1; // this is the right path's ID which is the rightmost leaf node: l-r_preorder(v) + |Fv| - 1
krSum_v = pre2krSum1[v_in_preL];
revkrSum_v = pre2revkrSum1[v_in_preL];
descSum_v = pre2descSum1[v_in_preL];
if(is_v_leaf) {
cost1_L[v] = leafRow;
cost1_R[v] = leafRow;
cost1_I[v] = leafRow;
for(int i = 0; i < size2; i++) {
strategypointer_v[postL_to_preL_2[i]] = v_in_preL;
}
}
cost_Lpointer_v = cost1_L[v];
cost_Rpointer_v = cost1_R[v];
cost_Ipointer_v = cost1_I[v];
if(parent_v_preL != -1 && cost1_L[parent_v_postL] == null) {
if (rowsToReuse_L.isEmpty()) {
cost1_L[parent_v_postL] = new float[size2];
cost1_R[parent_v_postL] = new float[size2];
cost1_I[parent_v_postL] = new float[size2];
} else {
cost1_L[parent_v_postL] = rowsToReuse_L.pop();
cost1_R[parent_v_postL] = rowsToReuse_R.pop();
cost1_I[parent_v_postL] = rowsToReuse_I.pop();
}
}
if (parent_v_preL != -1) {
cost_Lpointer_parent_v = cost1_L[parent_v_postL];
cost_Rpointer_parent_v = cost1_R[parent_v_postL];
cost_Ipointer_parent_v = cost1_I[parent_v_postL];
strategypointer_parent_v = strategy[parent_v_preL];
}
Arrays.fill(cost2_L, 0L);
Arrays.fill(cost2_R, 0L);
Arrays.fill(cost2_I, 0L);
Arrays.fill(cost2_path, 0);
for(int w = 0; w < size2; w++) {
w_in_preL = postL_to_preL_2[w];
parent_w_preL = pre2parent2[w_in_preL];
if (parent_w_preL != -1) {
parent_w_postL = preL_to_postL_2[parent_w_preL];
}
size_w = pre2size2[w_in_preL];
if (it2.isLeaf(w_in_preL)) {
cost2_L[w] = 0L;
cost2_R[w] = 0L;
cost2_I[w] = 0L;
cost2_path[w] = w_in_preL;
}
minCost = 0x7fffffffffffffffL;
strategyPath = -1;
float tmpCost = 0x7fffffffffffffffL;
if (size_v <= 1 || size_w <= 1) { // USE NEW SINGLE_PATH FUNCTIONS FOR SMALL SUBTREES
minCost = Math.max(size_v, size_w);
} else {
tmpCost = (float) size_v * (float) pre2krSum2[w_in_preL] + cost_Lpointer_v[w];
if (tmpCost < minCost) {
minCost = tmpCost;
strategyPath = leftPath_v;
}
tmpCost = (float) size_v * (float) pre2revkrSum2[w_in_preL] + cost_Rpointer_v[w];
if (tmpCost < minCost) {
minCost = tmpCost;
strategyPath = rightPath_v;
}
tmpCost = (float) size_v * (float) pre2descSum2[w_in_preL] + cost_Ipointer_v[w];
if (tmpCost < minCost) {
minCost = tmpCost;
strategyPath = (int)strategypointer_v[w_in_preL] + 1;
}
tmpCost = (float) size_w * (float) krSum_v + cost2_L[w];
if (tmpCost < minCost) {
minCost = tmpCost;
strategyPath = -(preR_to_preL_2[preL_to_preR_2[w_in_preL] + size_w - 1] + pathIDOffset + 1);
}
tmpCost = (float) size_w * (float) revkrSum_v + cost2_R[w];
if (tmpCost < minCost) {
minCost = tmpCost;
strategyPath = w_in_preL + size_w - 1 + pathIDOffset + 1;
}
tmpCost = (float) size_w * (float) descSum_v + cost2_I[w];
if (tmpCost < minCost) {
minCost = tmpCost;
strategyPath = cost2_path[w] + pathIDOffset + 1;
}
}
if (parent_v_preL != -1) {
cost_Rpointer_parent_v[w] += minCost;
tmpCost = -minCost + cost1_I[v][w];
if (tmpCost < cost1_I[parent_v_postL][w]) {
cost_Ipointer_parent_v[w] = tmpCost;
strategypointer_parent_v[w_in_preL] = strategypointer_v[w_in_preL];
}
if (nodeType_R_1[v_in_preL]) {
cost_Ipointer_parent_v[w] += cost_Rpointer_parent_v[w];
cost_Rpointer_parent_v[w] += cost_Rpointer_v[w] - minCost;
}
if (nodeType_L_1[v_in_preL]) {
cost_Lpointer_parent_v[w] += cost_Lpointer_v[w];
} else {
cost_Lpointer_parent_v[w] += minCost;
}
}
if (parent_w_preL != -1) {
cost2_R[parent_w_postL] += minCost;
tmpCost = -minCost + cost2_I[w];
if (tmpCost < cost2_I[parent_w_postL]) {
cost2_I[parent_w_postL] = tmpCost;
cost2_path[parent_w_postL] = cost2_path[w];
}
if (nodeType_R_2[w_in_preL]) {
cost2_I[parent_w_postL] += cost2_R[parent_w_postL];
cost2_R[parent_w_postL] += cost2_R[w] - minCost;
}
if (nodeType_L_2[w_in_preL]) {
cost2_L[parent_w_postL] += cost2_L[w];
} else {
cost2_L[parent_w_postL] += minCost;
}
}
strategypointer_v[w_in_preL] = strategyPath;
}
if (!it1.isLeaf(v_in_preL)) {
Arrays.fill(cost1_L[v], 0);
Arrays.fill(cost1_R[v], 0);
Arrays.fill(cost1_I[v], 0);
rowsToReuse_L.push(cost1_L[v]);
rowsToReuse_R.push(cost1_R[v]);
rowsToReuse_I.push(cost1_I[v]);
}
}
return strategy;
}
/**
* Compute the optimal strategy using right-to-left postorder traversal of
* the nodes [2, Algorithm 1].
*
* @param it1 node indexer of the source input tree.
* @param it2 node indexer of the destination input tree.
* @return array with the optimal strategy.
*/
// QUESTION: Is it possible to merge it with the other strategy computation?
// TODO: Document the internals. Point to lines of the lagorithm.
public float[][] computeOptStrategy_postR(NodeIndexer it1, NodeIndexer it2) {
int size1 = it1.getSize();
int size2 = it2.getSize();
float strategy[][] = new float[size1][size2];
float cost1_L[][] = new float[size1][];
float cost1_R[][] = new float[size1][];
float cost1_I[][] = new float[size1][];
float cost2_L[] = new float[size2];
float cost2_R[] = new float[size2];
float cost2_I[] = new float[size2];
int cost2_path[] = new int[size2];
float leafRow[] = new float[size2];
int pathIDOffset = size1;
float minCost = 0x7fffffffffffffffL;
int strategyPath = -1;
int[] pre2size1 = it1.sizes;
int[] pre2size2 = it2.sizes;
int[] pre2descSum1 = it1.preL_to_desc_sum;
int[] pre2descSum2 = it2.preL_to_desc_sum;
int[] pre2krSum1 = it1.preL_to_kr_sum;
int[] pre2krSum2 = it2.preL_to_kr_sum;
int[] pre2revkrSum1 = it1.preL_to_rev_kr_sum;
int[] pre2revkrSum2 = it2.preL_to_rev_kr_sum;
int[] preL_to_preR_1 = it1.preL_to_preR;
int[] preL_to_preR_2 = it2.preL_to_preR;
int[] preR_to_preL_1 = it1.preR_to_preL;
int[] preR_to_preL_2 = it2.preR_to_preL;
int[] pre2parent1 = it1.parents;
int[] pre2parent2 = it2.parents;
boolean[] nodeType_L_1 = it1.nodeType_L;
boolean[] nodeType_L_2 = it2.nodeType_L;
boolean[] nodeType_R_1 = it1.nodeType_R;
boolean[] nodeType_R_2 = it2.nodeType_R;
int size_v, parent_v, parent_w, size_w;
int leftPath_v, rightPath_v;
float[] cost_Lpointer_v, cost_Rpointer_v, cost_Ipointer_v;
float[] strategypointer_v;
float[] cost_Lpointer_parent_v = null, cost_Rpointer_parent_v = null, cost_Ipointer_parent_v = null;
float[] strategypointer_parent_v = null;
int krSum_v, revkrSum_v, descSum_v;
boolean is_v_leaf;
Stack<float[]> rowsToReuse_L = new Stack<>();
Stack<float[]> rowsToReuse_R = new Stack<>();
Stack<float[]> rowsToReuse_I = new Stack<>();
for(int v = size1 - 1; v >= 0; v--) {
is_v_leaf = it1.isLeaf(v);
parent_v = pre2parent1[v];
strategypointer_v = strategy[v];
size_v = pre2size1[v];
leftPath_v = -(preR_to_preL_1[preL_to_preR_1[v] + pre2size1[v] - 1] + 1);// this is the left path's ID which is the leftmost leaf node: l-r_preorder(r-l_preorder(v) + |Fv| - 1)
rightPath_v = v + pre2size1[v] - 1 + 1; // this is the right path's ID which is the rightmost leaf node: l-r_preorder(v) + |Fv| - 1
krSum_v = pre2krSum1[v];
revkrSum_v = pre2revkrSum1[v];
descSum_v = pre2descSum1[v];
if (is_v_leaf) {
cost1_L[v] = leafRow;
cost1_R[v] = leafRow;
cost1_I[v] = leafRow;
for (int i = 0; i < size2; i++) {
strategypointer_v[i] = v;
}
}
cost_Lpointer_v = cost1_L[v];
cost_Rpointer_v = cost1_R[v];
cost_Ipointer_v = cost1_I[v];
if (parent_v != -1 && cost1_L[parent_v] == null) {
if (rowsToReuse_L.isEmpty()) {
cost1_L[parent_v] = new float[size2];
cost1_R[parent_v] = new float[size2];
cost1_I[parent_v] = new float[size2];
} else {
cost1_L[parent_v] = rowsToReuse_L.pop();
cost1_R[parent_v] = rowsToReuse_R.pop();
cost1_I[parent_v] = rowsToReuse_I.pop();
}
}
if (parent_v != -1) {
cost_Lpointer_parent_v = cost1_L[parent_v];
cost_Rpointer_parent_v = cost1_R[parent_v];
cost_Ipointer_parent_v = cost1_I[parent_v];
strategypointer_parent_v = strategy[parent_v];
}
Arrays.fill(cost2_L, 0L);
Arrays.fill(cost2_R, 0L);
Arrays.fill(cost2_I, 0L);
Arrays.fill(cost2_path, 0);
for (int w = size2 - 1; w >= 0; w--) {
size_w = pre2size2[w];
if (it2.isLeaf(w)) {
cost2_L[w] = 0L;
cost2_R[w] = 0L;
cost2_I[w] = 0L;
cost2_path[w] = w;
}
minCost = 0x7fffffffffffffffL;
strategyPath = -1;
float tmpCost = 0x7fffffffffffffffL;
if (size_v <= 1 || size_w <= 1) { // USE NEW SINGLE_PATH FUNCTIONS FOR SMALL SUBTREES
minCost = Math.max(size_v, size_w);
} else {
tmpCost = (float) size_v * (float) pre2krSum2[w] + cost_Lpointer_v[w];
if (tmpCost < minCost) {
minCost = tmpCost;
strategyPath = leftPath_v;
}
tmpCost = (float) size_v * (float) pre2revkrSum2[w] + cost_Rpointer_v[w];
if (tmpCost < minCost){
minCost = tmpCost;
strategyPath = rightPath_v;
}
tmpCost = (float) size_v * (float) pre2descSum2[w] + cost_Ipointer_v[w];
if (tmpCost < minCost) {
minCost = tmpCost;
strategyPath = (int)strategypointer_v[w] + 1;
}
tmpCost = (float) size_w * (float) krSum_v + cost2_L[w];
if (tmpCost < minCost) {
minCost = tmpCost;
strategyPath = -(preR_to_preL_2[preL_to_preR_2[w] + size_w - 1] + pathIDOffset + 1);
}
tmpCost = (float) size_w * (float) revkrSum_v + cost2_R[w];
if (tmpCost < minCost) {
minCost = tmpCost;
strategyPath = w + size_w - 1 + pathIDOffset + 1;
}
tmpCost = (float) size_w * (float) descSum_v + cost2_I[w];
if (tmpCost < minCost) {
minCost = tmpCost;
strategyPath = cost2_path[w] + pathIDOffset + 1;
}
}
if (parent_v != -1) {
cost_Lpointer_parent_v[w] += minCost;
tmpCost = -minCost + cost1_I[v][w];
if (tmpCost < cost1_I[parent_v][w]) {
cost_Ipointer_parent_v[w] = tmpCost;
strategypointer_parent_v[w] = strategypointer_v[w];
}
if (nodeType_L_1[v]) {
cost_Ipointer_parent_v[w] += cost_Lpointer_parent_v[w];
cost_Lpointer_parent_v[w] += cost_Lpointer_v[w] - minCost;
}
if (nodeType_R_1[v]) {
cost_Rpointer_parent_v[w] += cost_Rpointer_v[w];
} else {
cost_Rpointer_parent_v[w] += minCost;
}
}
parent_w = pre2parent2[w];
if (parent_w != -1) {
cost2_L[parent_w] += minCost;
tmpCost = -minCost + cost2_I[w];
if (tmpCost < cost2_I[parent_w]) {
cost2_I[parent_w] = tmpCost;
cost2_path[parent_w] = cost2_path[w];
}
if (nodeType_L_2[w]) {
cost2_I[parent_w] += cost2_L[parent_w];
cost2_L[parent_w] += cost2_L[w] - minCost;
}
if (nodeType_R_2[w]) {
cost2_R[parent_w] += cost2_R[w];
} else {
cost2_R[parent_w] += minCost;
}
}
strategypointer_v[w] = strategyPath;
}
if (!it1.isLeaf(v)) {
Arrays.fill(cost1_L[v], 0);
Arrays.fill(cost1_R[v], 0);
Arrays.fill(cost1_I[v], 0);
rowsToReuse_L.push(cost1_L[v]);
rowsToReuse_R.push(cost1_R[v]);
rowsToReuse_I.push(cost1_I[v]);
}
}
return strategy;
}
/**
* Implements spf1 single path function for the case when one of the subtrees
* is a single node [2, Section 6.1, Algorithm 2].
*
* <p>We allow an arbitrary cost model which in principle may allow renames to
* have a lower cost than the respective deletion plus insertion. Thus,
* Formula 4 in [2] has to be modified to account for that case.
*
* <p>In this method we don't have to verify if input subtrees have been
* swapped because they're always passed in the original input order.
*
* @param ni1 node indexer for the source input subtree.
* @param ni2 node indexer for the destination input subtree.
* @param subtreeRootNode1 root node of a subtree in the source input tree.
* @param subtreeRootNode2 root node of a subtree in the destination input tree.
* @return the tree edit distance between two subtrees of the source and destination input subtrees.
*/
// TODO: Merge the initialisation loop in tedInit with this method.
// Currently, spf1 doesn't have to store distances in delta, because
// all of them have been stored in tedInit.
private float spf1 (NodeIndexer ni1, int subtreeRootNode1, NodeIndexer ni2, int subtreeRootNode2) {
int subtreeSize1 = ni1.sizes[subtreeRootNode1];
int subtreeSize2 = ni2.sizes[subtreeRootNode2];
if (subtreeSize1 == 1 && subtreeSize2 == 1) {
Node<D> n1 = ni1.preL_to_node[subtreeRootNode1];
Node<D> n2 = ni2.preL_to_node[subtreeRootNode2];
float maxCost = costModel.del(n1) + costModel.ins(n2);
float renCost = costModel.ren(n1, n2);
return renCost < maxCost ? renCost : maxCost;
}
if (subtreeSize1 == 1) {
Node<D> n1 = ni1.preL_to_node[subtreeRootNode1];
Node<D> n2 = null;
float cost = ni2.preL_to_sumInsCost[subtreeRootNode2];
float maxCost = cost + costModel.del(n1);
float minRenMinusIns = cost;
float nodeRenMinusIns = 0;
for (int i = subtreeRootNode2; i < subtreeRootNode2 + subtreeSize2; i++) {
n2 = ni2.preL_to_node[i];
nodeRenMinusIns = costModel.ren(n1, n2) - costModel.ins(n2);
if (nodeRenMinusIns < minRenMinusIns) {
minRenMinusIns = nodeRenMinusIns;
}
}
cost += minRenMinusIns;
return cost < maxCost ? cost : maxCost;
}
if (subtreeSize2 == 1) {
Node<D> n1 = null;
Node<D> n2 = ni2.preL_to_node[subtreeRootNode2];
float cost = ni1.preL_to_sumDelCost[subtreeRootNode1];
float maxCost = cost + costModel.ins(n2);
float minRenMinusDel = cost;
float nodeRenMinusDel = 0;
for (int i = subtreeRootNode1; i < subtreeRootNode1 + subtreeSize1; i++) {
n1 = ni1.preL_to_node[i];
nodeRenMinusDel = costModel.ren(n1, n2) - costModel.del(n1);
if (nodeRenMinusDel < minRenMinusDel) {
minRenMinusDel = nodeRenMinusDel;
}
}
cost += minRenMinusDel;
return cost < maxCost ? cost : maxCost;
}
return -1;
}
/**
* Implements GTED algorithm [1, Section 3.4].
*
* @param it1 node indexer for the source input tree.
* @param it2 node indexer for the destination input tree.
* @return the tree edit distance between the source and destination trees.
*/
// TODO: Document the internals. Point to lines of the algorithm.
private float gted(NodeIndexer it1, NodeIndexer it2) {
int currentSubtree1 = it1.getCurrentNode();
int currentSubtree2 = it2.getCurrentNode();
int subtreeSize1 = it1.sizes[currentSubtree1];
int subtreeSize2 = it2.sizes[currentSubtree2];
// Use spf1.
if ((subtreeSize1 == 1 || subtreeSize2 == 1)) {
return spf1(it1, currentSubtree1, it2, currentSubtree2);
}
int strategyPathID = (int)delta[currentSubtree1][currentSubtree2];
byte strategyPathType = -1;
int currentPathNode = Math.abs(strategyPathID) - 1;
int pathIDOffset = it1.getSize();
int parent = -1;
if(currentPathNode < pathIDOffset) {
strategyPathType = getStrategyPathType(strategyPathID, pathIDOffset, it1, currentSubtree1, subtreeSize1);
while((parent = it1.parents[currentPathNode]) >= currentSubtree1) {
int ai[];
int k = (ai = it1.children[parent]).length;
for(int i = 0; i < k; i++) {
int child = ai[i];
if(child != currentPathNode) {
it1.setCurrentNode(child);
gted(it1, it2);
}
}
currentPathNode = parent;
}
// TODO: Move this property away from node indexer and pass directly to spfs.
it1.setCurrentNode(currentSubtree1);
// Pass to spfs a boolean that says says if the order of input subtrees
// has been swapped compared to the order of the initial input trees.
// Used for accessing delta array and deciding on the edit operation
// [1, Section 3.4].
if (strategyPathType == 0) {
return spfL(it1, it2, false);
}
if (strategyPathType == 1) {
return spfR(it1, it2, false);
}
return spfA(it1, it2, Math.abs(strategyPathID) - 1, strategyPathType, false);
}
currentPathNode -= pathIDOffset;
strategyPathType = getStrategyPathType(strategyPathID, pathIDOffset, it2, currentSubtree2, subtreeSize2);
while((parent = it2.parents[currentPathNode]) >= currentSubtree2) {
int ai1[];
int l = (ai1 = it2.children[parent]).length;
for(int j = 0; j < l; j++) {
int child = ai1[j];
if(child != currentPathNode) {
it2.setCurrentNode(child);
gted(it1, it2);
}
}
currentPathNode = parent;
}
// TODO: Move this property away from node indexer and pass directly to spfs.
it2.setCurrentNode(currentSubtree2);
// Pass to spfs a boolean that says says if the order of input subtrees
// has been swapped compared to the order of the initial input trees. Used
// for accessing delta array and deciding on the edit operation
// [1, Section 3.4].
if (strategyPathType == 0) {
return spfL(it2, it1, true);
}
if (strategyPathType == 1) {
return spfR(it2, it1, true);
}
return spfA(it2, it1, Math.abs(strategyPathID) - pathIDOffset - 1, strategyPathType, true);
}
/**
* Implements the single-path function spfA. Here, we use it strictly for
* inner paths (spfL and spfR have better performance for leaft and right
* paths, respectively) [1, Sections 7 and 8]. However, in this stage it
* also executes correctly for left and right paths.
*
* @param it1 node indexer of the left-hand input subtree.
* @param it2 node indexer of the right-hand input subtree.
* @param pathID the left-to-right preorder id of the strategy path's leaf node.
* @param pathType type of the strategy path (LEFT, RIGHT, INNER).
* @param treesSwapped says if the order of input subtrees has been swapped
* compared to the order of the initial input trees. Used
* for accessing delta array and deciding on the edit
* operation.
* @return tree edit distance between left-hand and right-hand input subtrees.
*/
// TODO: Document the internals. Point to lines of the algorithm.
// The implementation has been micro-tuned: variables initialised once,
// pointers to arrays precomputed and fixed for entire lower-level loops,
// parts of lower-level loops that don't change moved to upper-level loops.
private float spfA(NodeIndexer it1, NodeIndexer it2, int pathID, byte pathType, boolean treesSwapped) {
Node<D>[] it2nodes = it2.preL_to_node;
Node<D> lFNode;
int[] it1sizes = it1.sizes;
int[] it2sizes = it2.sizes;
int[] it1parents = it1.parents;
int[] it2parents = it2.parents;
int[] it1preL_to_preR = it1.preL_to_preR;
int[] it2preL_to_preR = it2.preL_to_preR;
int[] it1preR_to_preL = it1.preR_to_preL;
int[] it2preR_to_preL = it2.preR_to_preL;
int currentSubtreePreL1 = it1.getCurrentNode();
int currentSubtreePreL2 = it2.getCurrentNode();
// Variables to incrementally sum up the forest sizes.
int currentForestSize1 = 0;
int currentForestSize2 = 0;
int tmpForestSize1 = 0;
// Variables to incrementally sum up the forest cost.
float currentForestCost1 = 0;
float currentForestCost2 = 0;
float tmpForestCost1 = 0;
int subtreeSize2 = it2.sizes[currentSubtreePreL2];
int subtreeSize1 = it1.sizes[currentSubtreePreL1];
float[][] t = new float[subtreeSize2+1][subtreeSize2+1];
float[][] s = new float[subtreeSize1+1][subtreeSize2+1];
float minCost = -1;
// sp1, sp2 and sp3 correspond to three elements of the minimum in the
// recursive formula [1, Figure 12].
float sp1 = 0;
float sp2 = 0;
float sp3 = 0;
int startPathNode = -1;
int endPathNode = pathID;
int it1PreLoff = endPathNode;
int it2PreLoff = currentSubtreePreL2;
int it1PreRoff = it1preL_to_preR[endPathNode];
int it2PreRoff = it2preL_to_preR[it2PreLoff];
// variable declarations which were inside the loops
int rFlast,lFlast,endPathNode_in_preR,startPathNode_in_preR,parent_of_endPathNode,parent_of_endPathNode_in_preR,
lFfirst,rFfirst,rGlast,rGfirst,lGfirst,rG_in_preL,rGminus1_in_preL,parent_of_rG_in_preL,lGlast,lF_in_preR,lFSubtreeSize,
lGminus1_in_preR,parent_of_lG,parent_of_lG_in_preR,rF_in_preL,rFSubtreeSize,
rGfirst_in_preL;
boolean leftPart,rightPart,fForestIsTree,lFIsConsecutiveNodeOfCurrentPathNode,lFIsLeftSiblingOfCurrentPathNode,
rFIsConsecutiveNodeOfCurrentPathNode,rFIsRightSiblingOfCurrentPathNode;
float[] sp1spointer,sp2spointer,sp3spointer,sp3deltapointer,swritepointer,sp1tpointer,sp3tpointer;
// These variables store the id of the source (which array) of looking up
// elements of the minimum in the recursive formula [1, Figures 12,13].
byte sp1source,sp3source;
// Loop A [1, Algorithm 3] - walk up the path.
while (endPathNode >= currentSubtreePreL1) {
it1PreLoff = endPathNode;
it1PreRoff = it1preL_to_preR[endPathNode];
rFlast = -1;
lFlast = -1;
endPathNode_in_preR = it1preL_to_preR[endPathNode];
startPathNode_in_preR = startPathNode == -1 ? 0x7fffffff : it1preL_to_preR[startPathNode];
parent_of_endPathNode = it1parents[endPathNode];
parent_of_endPathNode_in_preR = parent_of_endPathNode == -1 ? 0x7fffffff : it1preL_to_preR[parent_of_endPathNode];
if (startPathNode - endPathNode > 1) {
leftPart = true;
} else {
leftPart = false;
}
if (startPathNode >= 0 && startPathNode_in_preR - endPathNode_in_preR > 1) {
rightPart = true;
} else {
rightPart = false;
}
// Deal with nodes to the left of the path.
if (pathType == 1 || pathType == 2 && leftPart) {
if (startPathNode == -1) {
rFfirst = endPathNode_in_preR;
lFfirst = endPathNode;
} else {
rFfirst = startPathNode_in_preR;
lFfirst = startPathNode - 1;
}
if (!rightPart) {
rFlast = endPathNode_in_preR;
}
rGlast = it2preL_to_preR[currentSubtreePreL2];
rGfirst = (rGlast + subtreeSize2) - 1;
lFlast = rightPart ? endPathNode + 1 : endPathNode;
fn[fn.length - 1] = -1;
for (int i = currentSubtreePreL2; i < currentSubtreePreL2 + subtreeSize2; i++) {
fn[i] = -1;
ft[i] = -1;
}
// Store the current size and cost of forest in F.
tmpForestSize1 = currentForestSize1;
tmpForestCost1 = currentForestCost1;
// Loop B [1, Algoritm 3] - for all nodes in G (right-hand input tree).
for (int rG = rGfirst; rG >= rGlast; rG--) {
lGfirst = it2preR_to_preL[rG];
rG_in_preL = it2preR_to_preL[rG];
rGminus1_in_preL = rG <= it2preL_to_preR[currentSubtreePreL2] ? 0x7fffffff : it2preR_to_preL[rG - 1];
parent_of_rG_in_preL = it2parents[rG_in_preL];
// This if statement decides on the last lG node for Loop D [1, Algorithm 3];
if (pathType == 1){
if (lGfirst == currentSubtreePreL2 || rGminus1_in_preL != parent_of_rG_in_preL) {
lGlast = lGfirst;
} else {
lGlast = it2parents[lGfirst]+1;
}
} else {
lGlast = lGfirst == currentSubtreePreL2 ? lGfirst : currentSubtreePreL2+1;
}
updateFnArray(it2.preL_to_ln[lGfirst], lGfirst, currentSubtreePreL2);
updateFtArray(it2.preL_to_ln[lGfirst], lGfirst);
int rF = rFfirst;
// Reset size and cost of the forest in F.
currentForestSize1 = tmpForestSize1;
currentForestCost1 = tmpForestCost1;
// Loop C [1, Algorithm 3] - for all nodes to the left of the path node.
for (int lF = lFfirst; lF >= lFlast; lF--) {
// This if statement fixes rF node.
if (lF == lFlast && !rightPart) {
rF = rFlast;
}
lFNode = it1.preL_to_node[lF];
// Increment size and cost of F forest by node lF.
currentForestSize1++;
currentForestCost1 += (treesSwapped ? costModel.ins(lFNode) : costModel.del(lFNode)); // USE COST MODEL - sum up deletion cost of a forest.
// Reset size and cost of forest in G to subtree G_lGfirst.
currentForestSize2 = it2sizes[lGfirst];
currentForestCost2 = (treesSwapped ? it2.preL_to_sumDelCost[lGfirst] : it2.preL_to_sumInsCost[lGfirst]); // USE COST MODEL - reset to subtree insertion cost.
lF_in_preR = it1preL_to_preR[lF];
fForestIsTree = lF_in_preR == rF;
lFSubtreeSize = it1sizes[lF];
lFIsConsecutiveNodeOfCurrentPathNode = startPathNode - lF == 1;
lFIsLeftSiblingOfCurrentPathNode = lF + lFSubtreeSize == startPathNode;
sp1spointer = s[(lF + 1) - it1PreLoff];
sp2spointer = s[lF - it1PreLoff];
sp3spointer = s[0];
sp3deltapointer = treesSwapped ? null : delta[lF];
swritepointer = s[lF - it1PreLoff];
sp1source = 1; // Search sp1 value in s array by default.
sp3source = 1; // Search second part of sp3 value in s array by default.
if (fForestIsTree) { // F_{lF,rF} is a tree.
if (lFSubtreeSize == 1) { // F_{lF,rF} is a single node.
sp1source = 3;
} else if (lFIsConsecutiveNodeOfCurrentPathNode) { // F_{lF,rF}-lF is the path node subtree.
sp1source = 2;
}
sp3 = 0;
sp3source = 2;
} else {
if (lFIsConsecutiveNodeOfCurrentPathNode) {
sp1source = 2;
}
sp3 = currentForestCost1 - (treesSwapped ? it1.preL_to_sumInsCost[lF] : it1.preL_to_sumDelCost[lF]); // USE COST MODEL - Delete F_{lF,rF}-F_lF.
if (lFIsLeftSiblingOfCurrentPathNode) {
sp3source = 3;
}
}
if (sp3source == 1) {
sp3spointer = s[(lF + lFSubtreeSize) - it1PreLoff];
}
// Go to first lG.
int lG = lGfirst;
// currentForestSize2++;
// sp1, sp2, sp3 -- Done here for the first node in Loop D. It differs for consecutive nodes.
// sp1 -- START
switch(sp1source) {
case 1: sp1 = sp1spointer[lG - it2PreLoff]; break;
case 2: sp1 = t[lG - it2PreLoff][rG - it2PreRoff]; break;
case 3: sp1 = currentForestCost2; break; // USE COST MODEL - Insert G_{lG,rG}.
}
sp1 += (treesSwapped ? costModel.ins(lFNode) : costModel.del(lFNode));// USE COST MODEL - Delete lF, leftmost root node in F_{lF,rF}.
// sp1 -- END
minCost = sp1; // Start with sp1 as minimal value.
// sp2 -- START
if (currentForestSize2 == 1) { // G_{lG,rG} is a single node.
sp2 = currentForestCost1; // USE COST MODEL - Delete F_{lF,rF}.
} else { // G_{lG,rG} is a tree.
sp2 = q[lF];
}
sp2 += (treesSwapped ? costModel.del(it2nodes[lG]) : costModel.ins(it2nodes[lG]));// USE COST MODEL - Insert lG, leftmost root node in G_{lG,rG}.
if (sp2 < minCost) { // Check if sp2 is minimal value.
minCost = sp2;
}
// sp2 -- END
// sp3 -- START
if (sp3 < minCost) {
sp3 += treesSwapped ? delta[lG][lF] : sp3deltapointer[lG];
if (sp3 < minCost) {
sp3 += (treesSwapped ? costModel.ren(it2nodes[lG], lFNode) : costModel.ren(lFNode, it2nodes[lG])); // USE COST MODEL - Rename the leftmost root nodes in F_{lF,rF} and G_{lG,rG}.
if(sp3 < minCost) {
minCost = sp3;
}
}
}
// sp3 -- END
swritepointer[lG - it2PreLoff] = minCost;
// Go to next lG.
lG = ft[lG];
counter++;
// Loop D [1, Algorithm 3] - for all nodes to the left of rG.
while (lG >= lGlast) {
// Increment size and cost of G forest by node lG.
currentForestSize2++;
currentForestCost2 += (treesSwapped ? costModel.del(it2nodes[lG]) : costModel.ins(it2nodes[lG]));
switch(sp1source) {
case 1: sp1 = sp1spointer[lG - it2PreLoff] + (treesSwapped ? costModel.ins(lFNode) : costModel.del(lFNode)); break; // USE COST MODEL - Delete lF, leftmost root node in F_{lF,rF}.
case 2: sp1 = t[lG - it2PreLoff][rG - it2PreRoff] + (treesSwapped ? costModel.ins(lFNode) : costModel.del(lFNode)); break; // USE COST MODEL - Delete lF, leftmost root node in F_{lF,rF}.
case 3: sp1 = currentForestCost2 + (treesSwapped ? costModel.ins(lFNode) : costModel.del(lFNode)); break; // USE COST MODEL - Insert G_{lG,rG} and elete lF, leftmost root node in F_{lF,rF}.
}
sp2 = sp2spointer[fn[lG] - it2PreLoff] + (treesSwapped ? costModel.del(it2nodes[lG]) : costModel.ins(it2nodes[lG])); // USE COST MODEL - Insert lG, leftmost root node in G_{lG,rG}.
minCost = sp1;
if(sp2 < minCost) {
minCost = sp2;
}
sp3 = treesSwapped ? delta[lG][lF] : sp3deltapointer[lG];
if (sp3 < minCost) {
switch(sp3source) {
case 1: sp3 += sp3spointer[fn[(lG + it2sizes[lG]) - 1] - it2PreLoff]; break;
case 2: sp3 += currentForestCost2 - (treesSwapped ? it2.preL_to_sumDelCost[lG] : it2.preL_to_sumInsCost[lG]); break; // USE COST MODEL - Insert G_{lG,rG}-G_lG.
case 3: sp3 += t[fn[(lG + it2sizes[lG]) - 1] - it2PreLoff][rG - it2PreRoff]; break;
}
if (sp3 < minCost) {
sp3 += (treesSwapped ? costModel.ren(it2nodes[lG], lFNode) : costModel.ren(lFNode, it2nodes[lG])); // USE COST MODEL - Rename the leftmost root nodes in F_{lF,rF} and G_{lG,rG}.
if (sp3 < minCost) {
minCost = sp3;
}
}
}
swritepointer[lG - it2PreLoff] = minCost;
lG = ft[lG];
counter++;
}
}
if (rGminus1_in_preL == parent_of_rG_in_preL) {
if (!rightPart) {
if (leftPart) {
if (treesSwapped) {
delta[parent_of_rG_in_preL][endPathNode] = s[(lFlast + 1) - it1PreLoff][(rGminus1_in_preL + 1) - it2PreLoff];
} else {
delta[endPathNode][parent_of_rG_in_preL] = s[(lFlast + 1) - it1PreLoff][(rGminus1_in_preL + 1) - it2PreLoff];
}
}
if (endPathNode > 0 && endPathNode == parent_of_endPathNode + 1 && endPathNode_in_preR == parent_of_endPathNode_in_preR + 1) {
if (treesSwapped) {
delta[parent_of_rG_in_preL][parent_of_endPathNode] = s[lFlast - it1PreLoff][(rGminus1_in_preL + 1) - it2PreLoff];
} else {
delta[parent_of_endPathNode][parent_of_rG_in_preL] = s[lFlast - it1PreLoff][(rGminus1_in_preL + 1) - it2PreLoff];
}
}
}
for (int lF = lFfirst; lF >= lFlast; lF--) {
q[lF] = s[lF - it1PreLoff][(parent_of_rG_in_preL + 1) - it2PreLoff];
}
}
// TODO: first pointers can be precomputed
for (int lG = lGfirst; lG >= lGlast; lG = ft[lG]) {
t[lG - it2PreLoff][rG - it2PreRoff] = s[lFlast - it1PreLoff][lG - it2PreLoff];
}
}
}
// Deal with nodes to the right of the path.
if (pathType == 0 || pathType == 2 && rightPart || pathType == 2 && !leftPart && !rightPart) {
if (startPathNode == -1) {
lFfirst = endPathNode;
rFfirst = it1preL_to_preR[endPathNode];
} else {
rFfirst = it1preL_to_preR[startPathNode] - 1;
lFfirst = endPathNode + 1;
}
lFlast = endPathNode;
lGlast = currentSubtreePreL2;
lGfirst = (lGlast + subtreeSize2) - 1;
rFlast = it1preL_to_preR[endPathNode];
fn[fn.length - 1] = -1;
for (int i = currentSubtreePreL2; i < currentSubtreePreL2 + subtreeSize2; i++){
fn[i] = -1;
ft[i] = -1;
}
// Store size and cost of the current forest in F.
tmpForestSize1 = currentForestSize1;
tmpForestCost1 = currentForestCost1;
// Loop B' [1, Algorithm 3] - for all nodes in G.
for (int lG = lGfirst; lG >= lGlast; lG--) {
rGfirst = it2preL_to_preR[lG];
updateFnArray(it2.preR_to_ln[rGfirst], rGfirst, it2preL_to_preR[currentSubtreePreL2]);
updateFtArray(it2.preR_to_ln[rGfirst], rGfirst);
int lF = lFfirst;
lGminus1_in_preR = lG <= currentSubtreePreL2 ? 0x7fffffff : it2preL_to_preR[lG - 1];
parent_of_lG = it2parents[lG];
parent_of_lG_in_preR = parent_of_lG == -1 ? -1 : it2preL_to_preR[parent_of_lG];
// Reset size and cost of forest if F.
currentForestSize1 = tmpForestSize1;
currentForestCost1 = tmpForestCost1;
if (pathType == 0) {
if (lG == currentSubtreePreL2) {
rGlast = rGfirst;
} else if (it2.children[parent_of_lG][0] != lG) {
rGlast = rGfirst;
} else {
rGlast = it2preL_to_preR[parent_of_lG]+1;
}
} else {
rGlast = rGfirst == it2preL_to_preR[currentSubtreePreL2] ? rGfirst : it2preL_to_preR[currentSubtreePreL2];
}
// Loop C' [1, Algorithm 3] - for all nodes to the right of the path node.
for (int rF = rFfirst; rF >= rFlast; rF--) {
if (rF == rFlast) {
lF = lFlast;
}
rF_in_preL = it1preR_to_preL[rF];
// Increment size and cost of F forest by node rF.
currentForestSize1++;
currentForestCost1 += (treesSwapped ? costModel.ins(it1.preL_to_node[rF_in_preL]) : costModel.del(it1.preL_to_node[rF_in_preL])); // USE COST MODEL - sum up deletion cost of a forest.
// Reset size and cost of G forest to G_lG.
currentForestSize2 = it2sizes[lG];
currentForestCost2 = (treesSwapped ? it2.preL_to_sumDelCost[lG] : it2.preL_to_sumInsCost[lG]); // USE COST MODEL - reset to subtree insertion cost.
rFSubtreeSize = it1sizes[rF_in_preL];
if (startPathNode > 0) {
rFIsConsecutiveNodeOfCurrentPathNode = startPathNode_in_preR - rF == 1;
rFIsRightSiblingOfCurrentPathNode = rF + rFSubtreeSize == startPathNode_in_preR;
} else {
rFIsConsecutiveNodeOfCurrentPathNode = false;
rFIsRightSiblingOfCurrentPathNode = false;
}
fForestIsTree = rF_in_preL == lF;
Node<D> rFNode = it1.preL_to_node[rF_in_preL];
sp1spointer = s[(rF + 1) - it1PreRoff];
sp2spointer = s[rF - it1PreRoff];
sp3spointer = s[0];
sp3deltapointer = treesSwapped ? null : delta[rF_in_preL];
swritepointer = s[rF - it1PreRoff];
sp1tpointer = t[lG - it2PreLoff];
sp3tpointer = t[lG - it2PreLoff];
sp1source = 1;
sp3source = 1;
if (fForestIsTree) {
if (rFSubtreeSize == 1) {
sp1source = 3;
} else if (rFIsConsecutiveNodeOfCurrentPathNode) {
sp1source = 2;
}
sp3 = 0;
sp3source = 2;
} else {
if (rFIsConsecutiveNodeOfCurrentPathNode) {
sp1source = 2;
}
sp3 = currentForestCost1 - (treesSwapped ? it1.preL_to_sumInsCost[rF_in_preL] : it1.preL_to_sumDelCost[rF_in_preL]); // USE COST MODEL - Delete F_{lF,rF}-F_rF.
if (rFIsRightSiblingOfCurrentPathNode) {
sp3source = 3;
}
}
if (sp3source == 1) {
sp3spointer = s[(rF + rFSubtreeSize) - it1PreRoff];
}
if (currentForestSize2 == 1) {
sp2 = currentForestCost1;// USE COST MODEL - Delete F_{lF,rF}.
} else {
sp2 = q[rF];
}
int rG = rGfirst;
rGfirst_in_preL = it2preR_to_preL[rGfirst];
currentForestSize2++;
switch (sp1source) {
case 1: sp1 = sp1spointer[rG - it2PreRoff]; break;
case 2: sp1 = sp1tpointer[rG - it2PreRoff]; break;
case 3: sp1 = currentForestCost2; break; // USE COST MODEL - Insert G_{lG,rG}.
}
sp1 += (treesSwapped ? costModel.ins(rFNode) : costModel.del(rFNode)); // USE COST MODEL - Delete rF.
minCost = sp1;
sp2 += (treesSwapped ? costModel.del(it2nodes[rGfirst_in_preL]) : costModel.ins(it2nodes[rGfirst_in_preL])); // USE COST MODEL - Insert rG.
if (sp2 < minCost) {
minCost = sp2;
}
if (sp3 < minCost) {
sp3 += treesSwapped ? delta[rGfirst_in_preL][rF_in_preL] : sp3deltapointer[rGfirst_in_preL];
if (sp3 < minCost) {
sp3 += (treesSwapped ? costModel.ren(it2nodes[rGfirst_in_preL], rFNode) : costModel.ren(rFNode, it2nodes[rGfirst_in_preL]));
if (sp3 < minCost) {
minCost = sp3;
}
}
}
swritepointer[rG - it2PreRoff] = minCost;
rG = ft[rG];
counter++;
// Loop D' [1, Algorithm 3] - for all nodes to the right of lG;
while (rG >= rGlast) {
rG_in_preL = it2preR_to_preL[rG];
// Increment size and cost of G forest by node rG.
currentForestSize2++;
currentForestCost2 += (treesSwapped ? costModel.del(it2nodes[rG_in_preL]) : costModel.ins(it2nodes[rG_in_preL]));
switch (sp1source) {
case 1: sp1 = sp1spointer[rG - it2PreRoff] + (treesSwapped ? costModel.ins(rFNode) : costModel.del(rFNode)); break; // USE COST MODEL - Delete rF.
case 2: sp1 = sp1tpointer[rG - it2PreRoff] + (treesSwapped ? costModel.ins(rFNode) : costModel.del(rFNode)); break; // USE COST MODEL - Delete rF.
case 3: sp1 = currentForestCost2 + (treesSwapped ? costModel.ins(rFNode) : costModel.del(rFNode)); break; // USE COST MODEL - Insert G_{lG,rG} and delete rF.
}
sp2 = sp2spointer[fn[rG] - it2PreRoff] + (treesSwapped ? costModel.del(it2nodes[rG_in_preL]) : costModel.ins(it2nodes[rG_in_preL])); // USE COST MODEL - Insert rG.
minCost = sp1;
if (sp2 < minCost) {
minCost = sp2;
}
sp3 = treesSwapped ? delta[rG_in_preL][rF_in_preL] : sp3deltapointer[rG_in_preL];
if (sp3 < minCost) {
switch (sp3source) {
case 1: sp3 += sp3spointer[fn[(rG + it2sizes[rG_in_preL]) - 1] - it2PreRoff]; break;
case 2: sp3 += currentForestCost2 - (treesSwapped ? it2.preL_to_sumDelCost[rG_in_preL] : it2.preL_to_sumInsCost[rG_in_preL]); break; // USE COST MODEL - Insert G_{lG,rG}-G_rG.
case 3: sp3 += sp3tpointer[fn[(rG + it2sizes[rG_in_preL]) - 1] - it2PreRoff]; break;
}
if (sp3 < minCost) {
sp3 += (treesSwapped ? costModel.ren(it2nodes[rG_in_preL], rFNode) : costModel.ren(rFNode, it2nodes[rG_in_preL])); // USE COST MODEL - Rename rF to rG.
if (sp3 < minCost) {
minCost = sp3;
}
}
}
swritepointer[rG - it2PreRoff] = minCost;
rG = ft[rG];
counter++;
}
}
if (lG > currentSubtreePreL2 && lG - 1 == parent_of_lG) {
if (rightPart) {
if (treesSwapped) {
delta[parent_of_lG][endPathNode] = s[(rFlast + 1) - it1PreRoff][(lGminus1_in_preR + 1) - it2PreRoff];
} else {
delta[endPathNode][parent_of_lG] = s[(rFlast + 1) - it1PreRoff][(lGminus1_in_preR + 1) - it2PreRoff];
}
}
if (endPathNode > 0 && endPathNode == parent_of_endPathNode + 1 && endPathNode_in_preR == parent_of_endPathNode_in_preR + 1)
if (treesSwapped) {
delta[parent_of_lG][parent_of_endPathNode] = s[rFlast - it1PreRoff][(lGminus1_in_preR + 1) - it2PreRoff];
} else {
delta[parent_of_endPathNode][parent_of_lG] = s[rFlast - it1PreRoff][(lGminus1_in_preR + 1) - it2PreRoff];
}
for (int rF = rFfirst; rF >= rFlast; rF--) {
q[rF] = s[rF - it1PreRoff][(parent_of_lG_in_preR + 1) - it2PreRoff];
}
}
// TODO: first pointers can be precomputed
for (int rG = rGfirst; rG >= rGlast; rG = ft[rG]) {
t[lG - it2PreLoff][rG - it2PreRoff] = s[rFlast - it1PreRoff][rG - it2PreRoff];
}
}
}
// Walk up the path by one node.
startPathNode = endPathNode;
endPathNode = it1parents[endPathNode];
}
return minCost;
}
// ===================== BEGIN spfL
/**
* Implements single-path function for left paths [1, Sections 3.3,3.4,3.5].
* The parameters represent input subtrees for the single-path function.
* The order of the parameters is important. We use this single-path function
* due to better performance compared to spfA.
*
* @param it1 node indexer of the left-hand input subtree.
* @param it2 node indexer of the right-hand input subtree.
* @param treesSwapped says if the order of input subtrees has been swapped
* compared to the order of the initial input trees. Used
* for accessing delta array and deciding on the edit
* operation.
* @return tree edit distance between left-hand and right-hand input subtrees.
*/
private float spfL(NodeIndexer it1, NodeIndexer it2, boolean treesSwapped) {
// Initialise the array to store the keyroot nodes in the right-hand input
// subtree.
int[] keyRoots = new int[it2.sizes[it2.getCurrentNode()]];
Arrays.fill(keyRoots, -1);
// Get the leftmost leaf node of the right-hand input subtree.
int pathID = it2.preL_to_lld(it2.getCurrentNode());
// Calculate the keyroot nodes in the right-hand input subtree.
// firstKeyRoot is the index in keyRoots of the first keyroot node that
// we have to process. We need this index because keyRoots array is larger
// than the number of keyroot nodes.
int firstKeyRoot = computeKeyRoots(it2, it2.getCurrentNode(), pathID, keyRoots, 0);
// Initialise an array to store intermediate distances for subforest pairs.
float[][] forestdist = new float[it1.sizes[it1.getCurrentNode()]+1][it2.sizes[it2.getCurrentNode()]+1];
// Compute the distances between pairs of keyroot nodes. In the left-hand
// input subtree only the root is the keyroot. Thus, we compute the distance
// between the left-hand input subtree and all keyroot nodes in the
// right-hand input subtree.
for (int i = firstKeyRoot-1; i >= 0; i--) {
treeEditDist(it1, it2, it1.getCurrentNode(), keyRoots[i], forestdist, treesSwapped);
}
// Return the distance between the input subtrees.
return forestdist[it1.sizes[it1.getCurrentNode()]][it2.sizes[it2.getCurrentNode()]];
}
/**
* Calculates and stores keyroot nodes for left paths of the given subtree
* recursively.
*
* @param it2 node indexer.
* @param subtreeRootNode keyroot node - recursion point.
* @param pathID left-to-right preorder id of the leftmost leaf node of subtreeRootNode.
* @param keyRoots array that stores all key roots in the order of their left-to-right preorder ids.
* @param index the index of keyRoots array where to store the next keyroot node.
* @return the index of the first keyroot node to process.
*/
// TODO: Merge with computeRevKeyRoots - the only difference is between leftmost and rightmost leaf.
private int computeKeyRoots(NodeIndexer it2, int subtreeRootNode, int pathID, int[] keyRoots, int index) {
// The subtreeRootNode is a keyroot node. Add it to keyRoots.
keyRoots[index] = subtreeRootNode;
// Increment the index to know where to store the next keyroot node.
index++;
// Walk up the left path starting with the leftmost leaf of subtreeRootNode,
// until the child of subtreeRootNode.
int pathNode = pathID;
while (pathNode > subtreeRootNode) {
int parent = it2.parents[pathNode];
// For each sibling to the right of pathNode, execute this method recursively.
// Each right sibling of pathNode is a keyroot node.
for (int child : it2.children[parent]) {
// Execute computeKeyRoots recursively for the new subtree rooted at child and child's leftmost leaf node.
if (child != pathNode) index = computeKeyRoots(it2, child, it2.preL_to_lld(child), keyRoots, index);
}
// Walk up.
pathNode = parent;
}
return index;
}
/**
* Implements the core of spfL. Fills in forestdist array with intermediate
* distances of subforest pairs in dynamic-programming fashion.
*
* @param it1 node indexer of the left-hand input subtree.
* @param it2 node indexer of the right-hand input subtree.
* @param it1subtree left-to-right preorder id of the root node of the
* left-hand input subtree.
* @param it2subtree left-to-right preorder id of the root node of the
* right-hand input subtree.
* @param forestdist the array to be filled in with intermediate distances of subforest pairs.
* @param treesSwapped says if the order of input subtrees has been swapped
* compared to the order of the initial input trees. Used
* for accessing delta array and deciding on the edit
* operation.
*/
private void treeEditDist(NodeIndexer it1, NodeIndexer it2, int it1subtree, int it2subtree, float[][] forestdist, boolean treesSwapped) {
// Translate input subtree root nodes to left-to-right postorder.
int i = it1.preL_to_postL[it1subtree];
int j = it2.preL_to_postL[it2subtree];
// We need to offset the node ids for accessing forestdist array which has
// indices from 0 to subtree size. However, the subtree node indices do not
// necessarily start with 0.
// Whenever the original left-to-right postorder id has to be accessed, use
// i+ioff and j+joff.
int ioff = it1.postL_to_lld[i] - 1;
int joff = it2.postL_to_lld[j] - 1;
// Variables holding costs of each minimum element.
float da = 0;
float db = 0;
float dc = 0;
// Initialize forestdist array with deletion and insertion costs of each
// relevant subforest.
forestdist[0][0] = 0;
for (int i1 = 1; i1 <= i - ioff; i1++) {
forestdist[i1][0] = forestdist[i1 - 1][0] + (treesSwapped ? costModel.ins(it1.postL_to_node(i1 + ioff)) : costModel.del(it1.postL_to_node(i1 + ioff))); // USE COST MODEL - delete i1.
}
for (int j1 = 1; j1 <= j - joff; j1++) {
forestdist[0][j1] = forestdist[0][j1 - 1] + (treesSwapped ? costModel.del(it2.postL_to_node(j1 + joff)) : costModel.ins(it2.postL_to_node(j1 + joff))); // USE COST MODEL - insert j1.
}
// Fill in the remaining costs.
for (int i1 = 1; i1 <= i - ioff; i1++) {
for (int j1 = 1; j1 <= j - joff; j1++) {
// Increment the number of subproblems.
counter++;
// Calculate partial distance values for this subproblem.
float u = (treesSwapped ? costModel.ren(it2.postL_to_node(j1 + joff), it1.postL_to_node(i1 + ioff)) : costModel.ren(it1.postL_to_node(i1 + ioff), it2.postL_to_node(j1 + joff))); // USE COST MODEL - rename i1 to j1.
da = forestdist[i1 - 1][j1] + (treesSwapped ? costModel.ins(it1.postL_to_node(i1 + ioff)) : costModel.del(it1.postL_to_node(i1 + ioff))); // USE COST MODEL - delete i1.
db = forestdist[i1][j1 - 1] + (treesSwapped ? costModel.del(it2.postL_to_node(j1 + joff)) : costModel.ins(it2.postL_to_node(j1 + joff))); // USE COST MODEL - insert j1.
// If current subforests are subtrees.
if (it1.postL_to_lld[i1 + ioff] == it1.postL_to_lld[i] && it2.postL_to_lld[j1 + joff] == it2.postL_to_lld[j]) {
dc = forestdist[i1 - 1][j1 - 1] + u;
// Store the relevant distance value in delta array.
if (treesSwapped) {
delta[it2.postL_to_preL[j1 + joff]][it1.postL_to_preL[i1 + ioff]] = forestdist[i1 - 1][j1 - 1];
} else {
delta[it1.postL_to_preL[i1 + ioff]][it2.postL_to_preL[j1 + joff]] = forestdist[i1 - 1][j1 - 1];
}
} else {
dc = forestdist[it1.postL_to_lld[i1 + ioff] - 1 - ioff][it2.postL_to_lld[j1 + joff] - 1 - joff] +
(treesSwapped ? delta[it2.postL_to_preL[j1 + joff]][it1.postL_to_preL[i1 + ioff]] : delta[it1.postL_to_preL[i1 + ioff]][it2.postL_to_preL[j1 + joff]]) + u;
}
// Calculate final minimum.
forestdist[i1][j1] = da >= db ? db >= dc ? dc : db : da >= dc ? dc : da;
}
}
}
// ===================== END spfL
// ===================== BEGIN spfR
/**
* Implements single-path function for right paths [1, Sections 3.3,3.4,3.5].
* The parameters represent input subtrees for the single-path function.
* The order of the parameters is important. We use this single-path function
* due to better performance compared to spfA.
*
* @param it1 node indexer of the left-hand input subtree.
* @param it2 node indexer of the right-hand input subtree.
* @param treesSwapped says if the order of input subtrees has been swapped
* compared to the order of the initial input trees. Used
* for accessing delta array and deciding on the edit
* operation.
* @return tree edit distance between left-hand and right-hand input subtrees.
*/
private float spfR(NodeIndexer it1, NodeIndexer it2, boolean treesSwapped) {
// Initialise the array to store the keyroot nodes in the right-hand input
// subtree.
int[] revKeyRoots = new int[it2.sizes[it2.getCurrentNode()]];
Arrays.fill(revKeyRoots, -1);
// Get the rightmost leaf node of the right-hand input subtree.
int pathID = it2.preL_to_rld(it2.getCurrentNode());
// Calculate the keyroot nodes in the right-hand input subtree.
// firstKeyRoot is the index in keyRoots of the first keyroot node that
// we have to process. We need this index because keyRoots array is larger
// than the number of keyroot nodes.
int firstKeyRoot = computeRevKeyRoots(it2, it2.getCurrentNode(), pathID, revKeyRoots, 0);
// Initialise an array to store intermediate distances for subforest pairs.
float[][] forestdist = new float[it1.sizes[it1.getCurrentNode()]+1][it2.sizes[it2.getCurrentNode()]+1];
// Compute the distances between pairs of keyroot nodes. In the left-hand
// input subtree only the root is the keyroot. Thus, we compute the distance
// between the left-hand input subtree and all keyroot nodes in the
// right-hand input subtree.
for (int i = firstKeyRoot-1; i >= 0; i--) {
revTreeEditDist(it1, it2, it1.getCurrentNode(), revKeyRoots[i], forestdist, treesSwapped);
}
// Return the distance between the input subtrees.
return forestdist[it1.sizes[it1.getCurrentNode()]][it2.sizes[it2.getCurrentNode()]];
}
/**
* Calculates and stores keyroot nodes for right paths of the given subtree
* recursively.
*
* @param it2 node indexer.
* @param subtreeRootNode keyroot node - recursion point.
* @param pathID left-to-right preorder id of the rightmost leaf node of subtreeRootNode.
* @param revKeyRoots array that stores all key roots in the order of their left-to-right preorder ids.
* @param index the index of keyRoots array where to store the next keyroot node.
* @return the index of the first keyroot node to process.
*/
private int computeRevKeyRoots(NodeIndexer it2, int subtreeRootNode, int pathID, int[] revKeyRoots, int index) {
// The subtreeRootNode is a keyroot node. Add it to keyRoots.
revKeyRoots[index] = subtreeRootNode;
// Increment the index to know where to store the next keyroot node.
index++;
// Walk up the right path starting with the rightmost leaf of
// subtreeRootNode, until the child of subtreeRootNode.
int pathNode = pathID;
while (pathNode > subtreeRootNode) {
int parent = it2.parents[pathNode];
// For each sibling to the left of pathNode, execute this method recursively.
// Each left sibling of pathNode is a keyroot node.
for (int child : it2.children[parent]) {
// Execute computeRevKeyRoots recursively for the new subtree rooted at child and child's rightmost leaf node.
if (child != pathNode) index = computeRevKeyRoots(it2, child, it2.preL_to_rld(child), revKeyRoots, index);
}
// Walk up.
pathNode = parent;
}
return index;
}
/**
* Implements the core of spfR. Fills in forestdist array with intermediate
* distances of subforest pairs in dynamic-programming fashion.
*
* @param it1 node indexer of the left-hand input subtree.
* @param it2 node indexer of the right-hand input subtree.
* @param it1subtree left-to-right preorder id of the root node of the
* left-hand input subtree.
* @param it2subtree left-to-right preorder id of the root node of the
* right-hand input subtree.
* @param forestdist the array to be filled in with intermediate distances of
* subforest pairs.
* @param treesSwapped says if the order of input subtrees has been swapped
* compared to the order of the initial input trees. Used
* for accessing delta array and deciding on the edit
* operation.
*/
private void revTreeEditDist(NodeIndexer it1, NodeIndexer it2, int it1subtree, int it2subtree, float[][] forestdist, boolean treesSwapped) {
// Translate input subtree root nodes to right-to-left postorder.
int i = it1.preL_to_postR[it1subtree];
int j = it2.preL_to_postR[it2subtree];
// We need to offset the node ids for accessing forestdist array which has
// indices from 0 to subtree size. However, the subtree node indices do not
// necessarily start with 0.
// Whenever the original right-to-left postorder id has to be accessed, use
// i+ioff and j+joff.
int ioff = it1.postR_to_rld[i] - 1;
int joff = it2.postR_to_rld[j] - 1;
// Variables holding costs of each minimum element.
float da = 0;
float db = 0;
float dc = 0;
// Initialize forestdist array with deletion and insertion costs of each
// relevant subforest.
forestdist[0][0] = 0;
for (int i1 = 1; i1 <= i - ioff; i1++) {
forestdist[i1][0] = forestdist[i1 - 1][0] + (treesSwapped ? costModel.ins(it1.postR_to_node(i1 + ioff)) : costModel.del(it1.postR_to_node(i1 + ioff))); // USE COST MODEL - delete i1.
}
for (int j1 = 1; j1 <= j - joff; j1++) {
forestdist[0][j1] = forestdist[0][j1 - 1] + (treesSwapped ? costModel.del(it2.postR_to_node(j1 + joff)) : costModel.ins(it2.postR_to_node(j1 + joff))); // USE COST MODEL - insert j1.
}
// Fill in the remaining costs.
for (int i1 = 1; i1 <= i - ioff; i1++) {
for (int j1 = 1; j1 <= j - joff; j1++) {
// Increment the number of subproblems.
counter++;
// Calculate partial distance values for this subproblem.
float u = (treesSwapped ? costModel.ren(it2.postR_to_node(j1 + joff), it1.postR_to_node(i1 + ioff)) : costModel.ren(it1.postR_to_node(i1 + ioff), it2.postR_to_node(j1 + joff))); // USE COST MODEL - rename i1 to j1.
da = forestdist[i1 - 1][j1] + (treesSwapped ? costModel.ins(it1.postR_to_node(i1 + ioff)) : costModel.del(it1.postR_to_node(i1 + ioff))); // USE COST MODEL - delete i1.
db = forestdist[i1][j1 - 1] + (treesSwapped ? costModel.del(it2.postR_to_node(j1 + joff)) : costModel.ins(it2.postR_to_node(j1 + joff))); // USE COST MODEL - insert j1.
// If current subforests are subtrees.
if (it1.postR_to_rld[i1 + ioff] == it1.postR_to_rld[i] && it2.postR_to_rld[j1 + joff] == it2.postR_to_rld[j]) {
dc = forestdist[i1 - 1][j1 - 1] + u;
// Store the relevant distance value in delta array.
if (treesSwapped) {
delta[it2.postR_to_preL[j1+joff]][it1.postR_to_preL[i1+ioff]] = forestdist[i1 - 1][j1 - 1];
} else {
delta[it1.postR_to_preL[i1+ioff]][it2.postR_to_preL[j1+joff]] = forestdist[i1 - 1][j1 - 1];
}
} else {
dc = forestdist[it1.postR_to_rld[i1 + ioff] - 1 - ioff][it2.postR_to_rld[j1 + joff] - 1 - joff] +
(treesSwapped ? delta[it2.postR_to_preL[j1 + joff]][it1.postR_to_preL[i1 + ioff]] : delta[it1.postR_to_preL[i1 + ioff]][it2.postR_to_preL[j1 + joff]]) + u;
}
// Calculate final minimum.
forestdist[i1][j1] = da >= db ? db >= dc ? dc : db : da >= dc ? dc : da;
}
}
}
// ===================== END spfR
/**
* Decodes the path from the optimal strategy to its type.
*
* @param pathIDWithPathIDOffset raw path id from strategy array.
* @param pathIDOffset offset used to distinguish between paths in the source and destination trees.
* @param it node indexer.
* @param currentRootNodePreL the left-to-right preorder id of the current subtree processed in tree decomposition phase.
* @param currentSubtreeSize the size of the subtree currently processed in tree decomposition phase.
* @return type of the strategy path (LEFT, RIGHT, INNER).
*/
private byte getStrategyPathType(int pathIDWithPathIDOffset, int pathIDOffset, NodeIndexer it, int currentRootNodePreL, int currentSubtreeSize) {
if (Integer.signum(pathIDWithPathIDOffset) == -1) {
return LEFT;
}
int pathID = Math.abs(pathIDWithPathIDOffset) - 1;
if (pathID >= pathIDOffset) {
pathID = pathID - pathIDOffset;
}
if (pathID == (currentRootNodePreL + currentSubtreeSize) - 1) {
return RIGHT;
}
return INNER;
}
/**
* fn array used in the algorithm before [1]. Using it does not change the
* complexity.
*
* <p>TODO: Do not use it [1, Section 8.4].
*
* @param lnForNode ---
* @param node ---
* @param currentSubtreePreL ---
*/
private void updateFnArray(int lnForNode, int node, int currentSubtreePreL) {
if (lnForNode >= currentSubtreePreL) {
fn[node] = fn[lnForNode];
fn[lnForNode] = node;
} else {
fn[node] = fn[fn.length - 1];
fn[fn.length - 1] = node;
}
}
/**
* ft array used in the algorithm before [1]. Using it does not change the
* complexity.
*
* <p>TODO: Do not use it [1, Section 8.4].
*
* @param lnForNode ---
* @param node ---
*/
private void updateFtArray(int lnForNode, int node) {
ft[node] = lnForNode;
if(fn[node] > -1) {
ft[fn[node]] = node;
}
}
/**
* Compute the edit mapping between two trees. The trees are input trees
* to the distance computation and the distance must be computed before
* computing the edit mapping (distances of subtree pairs are required).
*
* @return Returns list of pairs of nodes that are mapped as pairs of their
* postorder IDs (starting with 1). Nodes that are deleted or
* inserted are mapped to 0.
*/
// TODO: Mapping computation requires more thorough documentation
// (methods computeEditMapping, forestDist, mappingCost).
// TODO: Before computing the mapping, verify if TED has been computed.
// Mapping computation should trigger distance computation if
// necessary.
public List<int[]> computeEditMapping() {
// Initialize tree and forest distance arrays.
// Arrays for subtree distrances is not needed because the distances
// between subtrees without the root nodes are already stored in delta.
float[][] forestdist = new float[size1 + 1][size2 + 1];
boolean rootNodePair = true;
// forestdist for input trees has to be computed
forestDist(it1, it2, size1, size2, forestdist);
// empty edit mapping
LinkedList<int[]> editMapping = new LinkedList<>();
// empty stack of tree Pairs
LinkedList<int[]> treePairs = new LinkedList<>();
// push the pair of trees (ted1,ted2) to stack
treePairs.push(new int[] { size1, size2 });
while (!treePairs.isEmpty()) {
// get next tree pair to be processed
int[] treePair = treePairs.pop();
int lastRow = treePair[0];
int lastCol = treePair[1];
// compute forest distance matrix
if (!rootNodePair) {
forestDist(it1, it2, lastRow, lastCol, forestdist);
}
rootNodePair = false;
// compute mapping for current forest distance matrix
int firstRow = it1.postL_to_lld[lastRow-1];
int firstCol = it2.postL_to_lld[lastCol-1];
int row = lastRow;
int col = lastCol;
while ((row > firstRow) || (col > firstCol)) {
if ((row > firstRow) && (forestdist[row - 1][col] + costModel.del(it1.postL_to_node(row-1)) == forestdist[row][col])) { // USE COST MODEL - Delete node row of source tree.
// node with postorderID row is deleted from ted1
editMapping.push(new int[] { row, 0 });
row--;
} else if ((col > firstCol) && (forestdist[row][col - 1] + costModel.ins(it2.postL_to_node(col-1)) == forestdist[row][col])) { // USE COST MODEL - Insert node col of destination tree.
// node with postorderID col is inserted into ted2
editMapping.push(new int[] { 0, col });
col--;
} else {
// node with postorderID row in ted1 is renamed to node col
// in ted2
if ((it1.postL_to_lld[row-1] == it1.postL_to_lld[lastRow-1]) && (it2.postL_to_lld[col-1] == it2.postL_to_lld[lastCol-1])) {
// if both subforests are trees, map nodes
editMapping.push(new int[] { row, col });
row--;
col--;
} else {
// push subtree pair
treePairs.push(new int[] { row, col });
// continue with forest to the left of the popped
// subtree pair
row = it1.postL_to_lld[row-1];
col = it2.postL_to_lld[col-1];
}
}
}
}
return editMapping;
}
/**
* Recalculates distances between subforests of two subtrees. These values
* are used in mapping computation to track back the origin of minimum values.
* It is basen on Zhang and Shasha algorithm.
*
* <p>The rename cost must be added in the last line. Otherwise the formula is
* incorrect. This is due to delta storing distances between subtrees
* without the root nodes.
*
* <p>i and j are postorder ids of the nodes - starting with 1.
*
* @param ted1 node indexer of the source input tree.
* @param ted2 node indexer of the destination input tree.
* @param i subtree root of source tree that is to be mapped.
* @param j subtree root of destination tree that is to be mapped.
* @param forestdist array to store distances between subforest pairs.
*/
private void forestDist(NodeIndexer ted1, NodeIndexer ted2, int i, int j, float[][] forestdist) {
forestdist[ted1.postL_to_lld[i-1]][ted2.postL_to_lld[j-1]] = 0;
for (int di = ted1.postL_to_lld[i-1]+1; di <= i; di++) {
forestdist[di][ted2.postL_to_lld[j-1]] = forestdist[di - 1][ted2.postL_to_lld[j-1]] + costModel.del(ted1.postL_to_node(di-1));
for (int dj = ted2.postL_to_lld[j-1]+1; dj <= j; dj++) {
forestdist[ted1.postL_to_lld[i-1]][dj] = forestdist[ted1.postL_to_lld[i-1]][dj - 1] + costModel.ins(ted2.postL_to_node(dj-1));
float costRen = costModel.ren(ted1.postL_to_node(di-1), ted2.postL_to_node(dj-1));
// TODO: The first two elements of the minimum can be computed here,
// similarly to spfL and spfR.
if ((ted1.postL_to_lld[di-1] == ted1.postL_to_lld[i-1]) && (ted2.postL_to_lld[dj-1] == ted2.postL_to_lld[j-1])) {
forestdist[di][dj] = Math.min(Math.min(
forestdist[di - 1][dj] + costModel.del(ted1.postL_to_node(di-1)),
forestdist[di][dj - 1] + costModel.ins(ted2.postL_to_node(dj-1))),
forestdist[di - 1][dj - 1] + costRen);
// If substituted with delta, this will overwrite the value
// in delta.
// It looks that we don't have to write this value.
// Conceptually it is correct because we already have all
// the values in delta for subtrees without the root nodes,
// and we need these.
// treedist[di][dj] = forestdist[di][dj];
} else {
// di and dj are postorder ids of the nodes - starting with 1
// Substituted 'treedist[di][dj]' with 'delta[it1.postL_to_preL[di-1]][it2.postL_to_preL[dj-1]]'
forestdist[di][dj] = Math.min(Math.min(
forestdist[di - 1][dj] + costModel.del(ted1.postL_to_node(di-1)),
forestdist[di][dj - 1] + costModel.ins(ted2.postL_to_node(dj-1))),
forestdist[ted1.postL_to_lld[di-1]][ted2.postL_to_lld[dj-1]] + delta[it1.postL_to_preL[di-1]][it2.postL_to_preL[dj-1]] + costRen);
}
}
}
}
/**
* Calculates the cost of an edit mapping. It traverses the mapping and sums
* up the cost of each operation. The costs are taken from the cost model.
*
* @param mapping an edit mapping.
* @return cost of edit mapping.
*/
public float mappingCost(List<int[]> mapping) {
float cost = 0.0f;
for (int[] element : mapping) {
if (element[0] == 0) { // Insertion.
cost += costModel.ins(it2.postL_to_node(element[1]-1));
} else if (element[1] == 0) { // Deletion.
cost += costModel.del(it1.postL_to_node(element[0]-1));
} else { // Rename.
cost += costModel.ren(it1.postL_to_node(element[0]-1), it2.postL_to_node(element[1]-1));
}
}
return cost;
}
}
| 79,725 | 42.733406 | 222 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/apted/distance/AllPossibleMappingsTED.java
|
/* MIT License
*
* Copyright (c) 2017 Mateusz Pawlik
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package apted.distance;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import apted.costmodel.CostModel;
import apted.node.Node;
import apted.node.NodeIndexer;
/**
* Implements an exponential algorithm for the tree edit distance. It computes
* all possible TED mappings between two trees and calculated their minimal
* cost.
*
* @param <C> type of cost model.
* @param <D> type of node data.
*/
public class AllPossibleMappingsTED<C extends CostModel, D> {
/**
* Indexer of the source tree.
*
* @see node.NodeIndexer
*/
private NodeIndexer it1;
/**
* Indexer of the destination tree.
*
* @see node.NodeIndexer
*/
private NodeIndexer it2;
/**
* The size of the source input tree.
*/
private int size1;
/**
* The size of the destination tree.
*/
private int size2;
/**
* Cost model to be used for calculating costs of edit operations.
*/
private C costModel;
/**
* Constructs the AllPossibleMappingsTED algorithm with a specific cost model.
*
* @param costModel a cost model used in the algorithm.
*/
public AllPossibleMappingsTED(C costModel) {
this.costModel = costModel;
}
/**
* Computes the tree edit distance between two trees by trying all possible
* TED mappings. It uses the specified cost model.
*
* @param t1 source tree.
* @param t2 destination tree.
* @return the tree edit distance between two trees.
*/
public float computeEditDistance(Node<D> t1, Node<D> t2) {
// Index the nodes of both input trees.
init(t1, t2);
ArrayList<ArrayList<int[]>> mappings = generateAllOneToOneMappings();
removeNonTEDMappings(mappings);
return getMinCost(mappings);
}
/**
* Indexes the input trees.
*
* @param t1 source tree.
* @param t2 destination tree.
*/
public void init(Node<D> t1, Node<D> t2) {
it1 = new NodeIndexer(t1, costModel);
it2 = new NodeIndexer(t2, costModel);
size1 = it1.getSize();
size2 = it2.getSize();
}
/**
* Generate all possible 1-1 mappings.
*
* <p>These mappings do not conform to TED conditions (sibling-order and
* ancestor-descendant).
*
* <p>A mapping is a list of pairs (arrays) of preorder IDs (identifying
* nodes).
*
* @return set of all 1-1 mappings.
*/
private ArrayList<ArrayList<int[]>> generateAllOneToOneMappings() {
// Start with an empty mapping - all nodes are deleted or inserted.
ArrayList<ArrayList<int[]>> mappings = new ArrayList<>(1);
mappings.add(new ArrayList<int[]>(size1 + size2));
// Add all deleted nodes.
for (int n1 = 0; n1 < size1; n1++) {
mappings.get(0).add(new int[]{n1, -1});
}
// Add all inserted nodes.
for (int n2 = 0; n2 < size2; n2++) {
mappings.get(0).add(new int[]{-1, n2});
}
// For each node in the source tree.
for (int n1 = 0; n1 < size1; n1++) {
// Duplicate all mappings and store in mappings_copy.
ArrayList<ArrayList<int[]>> mappings_copy = deepMappingsCopy(mappings);
// For each node in the destination tree.
for (int n2 = 0; n2 < size2; n2++) {
// For each mapping (produced for all n1 values smaller than
// current n1).
for (ArrayList<int[]> m : mappings_copy) {
// Produce new mappings with the pair (n1, n2) by adding this
// pair to all mappings where it is valid to add.
boolean element_add = true;
// Verify if (n1, n2) can be added to mapping m.
// All elements in m are checked with (n1, n2) for possible
// violation.
// One-to-one condition.
for (int[] e : m) {
// n1 is not in any of previous mappings
if (e[0] != -1 && e[1] != -1 && e[1] == n2) {
element_add = false;
// System.out.println("Add " + n2 + " false.");
break;
}
}
// New mappings must be produced by duplicating a previous
// mapping and extending it by (n1, n2).
if (element_add) {
ArrayList<int[]> m_copy = deepMappingCopy(m);
m_copy.add(new int[]{n1, n2});
// If a pair (n1,n2) is added, (n1,-1) and (-1,n2) must be removed.
removeMappingElement(m_copy, new int[]{n1, -1});
removeMappingElement(m_copy, new int[]{-1, n2});
mappings.add(m_copy);
}
}
}
}
return mappings;
}
/**
* Given all 1-1 mappings, discard these that violate TED conditions
* (ancestor-descendant and sibling order).
*
* @param mappings set of all 1-1 mappings.
*/
private void removeNonTEDMappings(ArrayList<ArrayList<int[]>> mappings) {
// Validate each mapping separately.
// Iterator safely removes mappings while iterating.
for (Iterator<ArrayList<int[]>> mit = mappings.iterator(); mit.hasNext();) {
ArrayList<int[]> m = mit.next();
if (!isTEDMapping(m)) {
mit.remove();
}
}
}
/**
* Test if a 1-1 mapping is a TED mapping.
*
* @param m a 1-1 mapping.
* @return {@code true} if {@code m} is a TED mapping, and {@code false}
* otherwise.
*/
boolean isTEDMapping(ArrayList<int[]> m) {
// Validate each pair of pairs of mapped nodes in the mapping.
for (int[] e1 : m) {
// Use only pairs of mapped nodes for validation.
if (e1[0] == -1 || e1[1] == -1) {
continue;
}
for (int[] e2 : m) {
// Use only pairs of mapped nodes for validation.
if (e2[0] == -1 || e2[1] == -1) {
continue;
}
// If any of the conditions below doesn't hold, discard m.
// Validate ancestor-descendant condition.
boolean a = e1[0] < e2[0] && it1.preL_to_preR[e1[0]] < it1.preL_to_preR[e2[0]];
boolean b = e1[1] < e2[1] && it2.preL_to_preR[e1[1]] < it2.preL_to_preR[e2[1]];
if ((a && !b) || (!a && b)) {
// Discard the mapping.
// If this condition doesn't hold, the next condition
// doesn't have to be verified any more and any other
// pair (e1, e2) doesn't have to be verified any more.
return false;
}
// Validate sibling-order condition.
a = e1[0] < e2[0] && it1.preL_to_preR[e1[0]] > it1.preL_to_preR[e2[0]];
b = e1[1] < e2[1] && it2.preL_to_preR[e1[1]] > it2.preL_to_preR[e2[1]];
if ((a && !b) || (!a && b)) {
// Discard the mapping.
return false;
}
}
}
return true;
}
/**
* Given list of all TED mappings, calculate the cost of the minimal-cost
* mapping.
*
* @param tedMappings set of all TED mappings.
* @return the minimal cost among all TED mappings.
*/
float getMinCost(ArrayList<ArrayList<int[]>> tedMappings) {
// Initialize min_cost to the upper bound.
float min_cost = size1 + size2;
// System.out.println("min_cost = " + min_cost);
// Verify cost of each mapping.
for (ArrayList<int[]> m : tedMappings) {
float m_cost = 0;
// Sum up edit costs for all elements in the mapping m.
for (int[] e : m) {
// Add edit operation cost.
if (e[0] > -1 && e[1] > -1) {
m_cost += costModel.ren(it1.preL_to_node[e[0]], it2.preL_to_node[e[1]]); // USE COST MODEL - rename e[0] to e[1].
} else if (e[0] > -1) {
m_cost += costModel.del(it1.preL_to_node[e[0]]); // USE COST MODEL - insert e[1].
} else {
m_cost += costModel.ins(it2.preL_to_node[e[1]]); // USE COST MODEL - delete e[0].
}
// Break as soon as the current min_cost is exceeded.
// Only for early loop break.
if (m_cost >= min_cost) {
break;
}
}
// Store the minimal cost - compare m_cost and min_cost
if (m_cost < min_cost) {
min_cost = m_cost;
}
// System.out.printf("min_cost = %.8f\n", min_cost);
}
return min_cost;
}
/**
* Makes a deep copy of a mapping.
*
* @param mapping mapping to copy.
* @return a mapping.
*/
private ArrayList<int[]> deepMappingCopy(ArrayList<int[]> mapping) {
ArrayList<int[]> mapping_copy = new ArrayList<>(mapping.size());
for (int[] me : mapping) { // for each mapping element in a mapping
mapping_copy.add(Arrays.copyOf(me, me.length));
}
return mapping_copy;
}
/**
* Makes a deep copy of a set of mappings.
*
* @param mappings set of mappings to copy.
* @return set of mappings.
*/
private ArrayList<ArrayList<int[]>> deepMappingsCopy(ArrayList<ArrayList<int[]>> mappings) {
ArrayList<ArrayList<int[]>> mappings_copy = new ArrayList<>(mappings.size());
for (ArrayList<int[]> m : mappings) { // for each mapping in mappings
ArrayList<int[]> m_copy = new ArrayList<>(m.size());
for (int[] me : m) { // for each mapping element in a mapping
m_copy.add(Arrays.copyOf(me, me.length));
}
mappings_copy.add(m_copy);
}
return mappings_copy;
}
/**
* Constructs a string representation of a set of mappings.
*
* @param mappings set of mappings to convert.
* @return string representation of a set of mappings.
*/
private String mappingsToString(ArrayList<ArrayList<int[]>> mappings) {
String result = "Mappings:\n";
for (ArrayList<int[]> m : mappings) {
result += "{";
for (int[] me : m) {
result += "[" + me[0] + "," + me[1] + "]";
}
result += "}\n";
}
return result;
}
/**
* Removes an element (edit operation) from a mapping by its value. In our
* case the element to remove can be always found in the mapping.
*
* @param m an edit mapping.
* @param e element to remove from {@code m}.
* @return {@code true} if {@code e} has been removed, and {@code false}
* otherwise.
*/
private boolean removeMappingElement(ArrayList<int[]> m, int[] e) {
for (int[] me : m) {
if (me[0] == e[0] && me[1] == e[1]) {
m.remove(me);
return true;
}
}
return false;
}
}
| 11,261 | 32.026393 | 123 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/apted/node/Node.java
|
/* MIT License
*
* Copyright (c) 2017 Mateusz Pawlik
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package apted.node;
import java.util.ArrayList;
import java.util.List;
/**
* This is a recursive representation of an ordered tree. Each node stores a
* list of pointers to its children. The order of children is significant and
* must be observed while implmeneting a custom input parser.
*
* @param <D> the type of node data (node label).
*/
public class Node<D> {
/**
* Information associated to and stored at each node. This can be anything
* and depends on the application, for example, string label, key-value pair,
* list of values, etc.
*/
private D nodeData;
/**
* Array of pointers to this node's children. The order of children is
* significant due to the definition of ordered trees.
*/
private List<Node<D>> children;
/**
* Constructs a new node with the passed node data and an empty list of
* children.
*
* @param nodeData instance of node data (node label).
*/
public Node(D nodeData) {
this.children = new ArrayList<>();
setNodeData(nodeData);
}
/**
* Counts the number of nodes in a tree rooted at this node.
*
* <p>This method runs in linear time in the tree size.
*
* @return number of nodes in the tree rooted at this node.
*/
public int getNodeCount() {
int sum = 1;
for(Node<D> child : getChildren()) {
sum += child.getNodeCount();
}
return sum;
}
/**
* Adds a new child at the end of children list. The added child will be
* the last child of this node.
*
* @param c child node to add.
*/
public void addChild(Node c) {
this.children.add(c);
}
/**
* Returns a string representation of the tree in bracket notation.
*
* <p>IMPORTANT: Works only for nodes storing {@link node.StringNodeData}
* due to using {@link node.StringNodeData#getLabel()}.
*
* @return tree in bracket notation.
*/
@Override
public String toString() {
String res = (new StringBuilder("{")).append(((StringNodeData)getNodeData()).getLabel()).toString();
for(Node<D> child : getChildren()) {
res = (new StringBuilder(String.valueOf(res))).append(child.toString()).toString();
}
res = (new StringBuilder(String.valueOf(res))).append("}").toString();
return res;
}
/**
* Returns node data. Used especially for calculating rename cost.
*
* @return node data (label of a node).
*/
public D getNodeData() {
return nodeData;
}
/**
* Sets the node data of this node.
*
* @param nodeData instance of node data (node label).
*/
public void setNodeData(D nodeData) {
this.nodeData = nodeData;
}
/**
* Returns the list with all node's children.
*
* @return children of the node.
*/
public List<Node<D>> getChildren() {
return children;
}
}
| 3,932 | 28.571429 | 104 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/apted/node/NodeIndexer.java
|
/* MIT License
*
* Copyright (c) 2017 Mateusz Pawlik
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package apted.node;
import java.util.ArrayList;
import java.util.Iterator;
import apted.costmodel.CostModel;
/**
* Indexes nodes of the input tree to the algorithm that is already parsed to
* tree structure using {@link node.Node} class. Stores various indices on
* nodes required for efficient computation of APTED [1,2]. Additionally, it
* stores
* single-value properties of the tree.
*
* <p>For indexing we use four tree traversals that assign ids to the nodes:
* <ul>
* <li>left-to-right preorder [1],
* <li>right-to-left preorder [1],
* <li>left-to-right postorder [2],
* <li>right-to-left postorder [2].
* </ul>
*
* <p>See the source code for more algorithm-related comments.
*
* <p>References:
* <ul>
* <li>[1] M. Pawlik and N. Augsten. Efficient Computation of the Tree Edit
* Distance. ACM Transactions on Database Systems (TODS) 40(1). 2015.
* <li>[2] M. Pawlik and N. Augsten. Tree edit distance: Robust and memory-
* efficient. Information Systems 56. 2016.
* </ul>
*
* @param <D> type of node data.
* @param <C> type of cost model.
* @see node.Node
* @see parser.InputParser
*/
public class NodeIndexer<D, C extends CostModel> {
// [TODO] Be consistent in naming index variables: <FROM>_to_<TO>.
// Structure indices.
/**
* Index from left-to-right preorder id of node n (starting with {@code 0})
* to Node object corresponding to n. Used for cost of edit operations.
*
* @see node.Node
*/
public Node<D> preL_to_node[];
/**
* Index from left-to-right preorder id of node n (starting with {@code 0})
* to the size of n's subtree (node n and all its descendants).
*/
public int sizes[];
/**
* Index from left-to-right preorder id of node n (starting with {@code 0})
* to the left-to-right preorder id of n's parent.
*/
public int parents[];
/**
* Index from left-to-right preorder id of node n (starting with {@code 0})
* to the array of n's children. Size of children array at node n equals the number
* of n's children.
*/
public int children[][];
/**
* Index from left-to-right postorder id of node n (starting with {@code 0})
* to the left-to-right postorder id of n's leftmost leaf descendant.
*/
public int postL_to_lld[];
/**
* Index from right-to-left postorder id of node n (starting with {@code 0})
* to the right-to-left postorder id of n's rightmost leaf descendant.
*/
public int postR_to_rld[];
/**
* Index from left-to-right preorder id of node n (starting with {@code 0})
* to the left-to-right preorder id of the first leaf node to the left of n.
* If there is no leaf node to the left of n, it is represented with the
* value {@code -1} [1, Section 8.4].
*/
public int preL_to_ln[];
/**
* Index from right-to-left preorder id of node n (starting with {@code 0})
* to the right-to-left preorder id of the first leaf node to the right of n.
* If there is no leaf node to the right of n, it is represented with the
* value {@code -1} [1, Section 8.4].
*/
public int preR_to_ln[];
/**
* Index from left-to-right preorder id of node n (starting with {@code 0})
* to a boolean value that states if node n lies on the leftmost path
* starting at n's parent [2, Algorithm 1, Lines 26,36].
*/
public boolean nodeType_L[];
/**
* Index from left-to-right preorder id of node n (starting with {@code 0})
* to a boolean value that states if node n lies on the rightmost path
* starting at n's parent input tree [2, Section 5.3, Algorithm 1, Lines 26,36].
*/
public boolean nodeType_R[];
// Traversal translation indices.
/**
* Index from left-to-right preorder id of node n (starting with {@code 0})
* to the right-to-left preorder id of n.
*/
public int preL_to_preR[];
/**
* Index from right-to-left preorder id of node n (starting with {@code 0})
* to the left-to-right preorder id of n.
*/
public int preR_to_preL[];
/**
* Index from left-to-right preorder id of node n (starting with {@code 0})
* to the left-to-right postorder id of n.
*/
public int preL_to_postL[];
/**
* Index from left-to-right postorder id of node n (starting with {@code 0})
* to the left-to-right preorder id of n.
*/
public int postL_to_preL[];
/**
* Index from left-to-right preorder id of node n (starting with {@code 0})
* to the right-to-left postorder id of n.
*/
public int preL_to_postR[];
/**
* Index from right-to-left postorder id of node n (starting with {@code 0})
* to the left-to-right preorder id of n.
*/
public int postR_to_preL[];
// Cost indices.
/**
* Index from left-to-right preorder id of node n (starting with {@code 0})
* to the cost of spf_L (single path function using the leftmost path) for
* the subtree rooted at n [1, Section 5.2].
*/
public int preL_to_kr_sum[];
/**
* Index from left-to-right preorder id of node n (starting with {@code 0})
* to the cost of spf_R (single path function using the rightmost path) for
* the subtree rooted at n [1, Section 5.2].
*/
public int preL_to_rev_kr_sum[];
/**
* Index from left-to-right preorder id of node n (starting with {@code 0})
* to the cost of spf_A (single path function using an inner path) for the
* subtree rooted at n [1, Section 5.2].
*/
public int preL_to_desc_sum[];
/**
* Index from left-to-right preorder id of node n (starting with {@code 0})
* to the cost of deleting all nodes in the subtree rooted at n.
*/
public float preL_to_sumDelCost[];
/**
* Index from left-to-right preorder id of node n (starting with {@code 0})
* to the cost of inserting all nodes in the subtree rooted at n.
*/
public float preL_to_sumInsCost[];
// Variables holding values modified at runtime while the algorithm executes.
/**
* Stores the left-to-right preorder id of the current subtree's root node.
* Used in the tree decomposition phase of APTED [1, Algorithm 1].
*/
private int currentNode;
// Structure single-value variables.
/**
* Stores the size of the input tree.
*/
private int treeSize;
/**
* Stores the number of leftmost-child leaf nodes in the input tree
* [2, Section 5.3].
*/
public int lchl;
/**
* Stores the number of rightmost-child leaf nodes in the input tree
* [2, Section 5.3].
*/
public int rchl;
// Variables used temporarily while indexing.
/**
* Temporary variable used in indexing for storing subtree size.
*/
private int sizeTmp;
/**
* Temporary variable used in indexing for storing sum of subtree sizes
* rooted at descendant nodes.
*/
private int descSizesTmp;
/**
* Temporary variable used in indexing for storing sum of keyroot node sizes.
*/
private int krSizesSumTmp;
/**
* Temporary variable used in indexing for storing sum of right-to-left
* keyroot node sizes.
*/
private int revkrSizesSumTmp;
/**
* Temporary variable used in indexing for storing preorder index of a node.
*/
private int preorderTmp;
private C costModel;
/**
* Indexes the nodes of input trees and stores the indices for quick access
* from APTED algorithm.
*
* @param inputTree an input tree to APTED. Its nodes will be indexed.
* @param costModel instance of a cost model to compute preL_to_sumDelCost
* and preL_to_sumInsCost.
*/
public NodeIndexer(Node<D> inputTree, C costModel) {
// Initialise variables.
sizeTmp = 0;
descSizesTmp = 0;
krSizesSumTmp = 0;
revkrSizesSumTmp = 0;
preorderTmp = 0;
currentNode = 0;
treeSize = inputTree.getNodeCount();
// Initialise indices with the lengths equal to the tree size.
sizes = new int[treeSize];
preL_to_preR = new int[treeSize];
preR_to_preL = new int[treeSize];
preL_to_postL = new int[treeSize];
postL_to_preL = new int[treeSize];
preL_to_postR = new int[treeSize];
postR_to_preL = new int[treeSize];
postL_to_lld = new int[treeSize];
postR_to_rld = new int[treeSize];
preL_to_node = new Node[treeSize];
preL_to_ln = new int[treeSize];
preR_to_ln = new int[treeSize];
preL_to_kr_sum = new int[treeSize];
preL_to_rev_kr_sum = new int[treeSize];
preL_to_desc_sum = new int[treeSize];
preL_to_sumDelCost = new float[treeSize];
preL_to_sumInsCost = new float[treeSize];
children = new int[treeSize][];
nodeType_L = new boolean[treeSize];
nodeType_R = new boolean[treeSize];
parents = new int[treeSize];
parents[0] = -1; // The root has no parent.
this.costModel = costModel;
// Index the nodes.
indexNodes(inputTree, -1);
postTraversalIndexing();
}
/**
* Indexes the nodes of the input tree. Stores information about each tree
* node in index arrays. It computes the following indices: {@link #parents},
* {@link #children}, {@link #nodeType_L}, {@link #nodeType_R},
* {@link #preL_to_desc_sum}, {@link #preL_to_kr_sum},
* {@link #preL_to_rev_kr_sum}, {@link #preL_to_node}, {@link #sizes},
* {@link #preL_to_preR}, {@link #preR_to_preL}, {@link #postL_to_preL},
* {@link #preL_to_postL}, {@link #preL_to_postR}, {@link #postR_to_preL}.
*
* <p>It is a recursive method that traverses the tree once.
*
* @param node is the current node while traversing the input tree.
* @param postorder is the postorder id of the current node.
* @return postorder id of the current node.
*/
private int indexNodes(Node<D> node, int postorder) {
// Initialise variables.
int currentSize = 0;
int childrenCount = 0;
int descSizes = 0;
int krSizesSum = 0;
int revkrSizesSum = 0;
int preorder = preorderTmp;
int preorderR = 0;
int currentPreorder = -1;
// Initialise empty array to store children of this node.
ArrayList<Integer> childrenPreorders = new ArrayList<>();
// Store the preorder id of the current node to use it after the recursion.
preorderTmp++;
// Loop over children of a node.
Iterator<Node<D>> childrenIt = node.getChildren().iterator();
while (childrenIt.hasNext()) {
childrenCount++;
currentPreorder = preorderTmp;
parents[currentPreorder] = preorder;
// Execute method recursively for next child.
postorder = indexNodes(childrenIt.next(), postorder);
childrenPreorders.add(Integer.valueOf(currentPreorder));
currentSize += 1 + sizeTmp;
descSizes += descSizesTmp;
if(childrenCount > 1) {
krSizesSum += krSizesSumTmp + sizeTmp + 1;
} else {
krSizesSum += krSizesSumTmp;
nodeType_L[currentPreorder] = true;
}
if(childrenIt.hasNext()) {
revkrSizesSum += revkrSizesSumTmp + sizeTmp + 1;
} else {
revkrSizesSum += revkrSizesSumTmp;
nodeType_R[currentPreorder] = true;
}
}
postorder++;
int currentDescSizes = descSizes + currentSize + 1;
preL_to_desc_sum[preorder] = ((currentSize + 1) * (currentSize + 1 + 3)) / 2 - currentDescSizes;
preL_to_kr_sum[preorder] = krSizesSum + currentSize + 1;
preL_to_rev_kr_sum[preorder] = revkrSizesSum + currentSize + 1;
// Store pointer to a node object corresponding to preorder.
preL_to_node[preorder] = node;
sizes[preorder] = currentSize + 1;
preorderR = treeSize - 1 - postorder;
preL_to_preR[preorder] = preorderR;
preR_to_preL[preorderR] = preorder;
children[preorder] = toIntArray(childrenPreorders);
descSizesTmp = currentDescSizes;
sizeTmp = currentSize;
krSizesSumTmp = krSizesSum;
revkrSizesSumTmp = revkrSizesSum;
postL_to_preL[postorder] = preorder;
preL_to_postL[preorder] = postorder;
preL_to_postR[preorder] = treeSize-1-preorder;
postR_to_preL[treeSize-1-preorder] = preorder;
return postorder;
}
/**
* Indexes the nodes of the input tree. It computes the following indices,
* which could not be computed immediately while traversing the tree in
* {@link #indexNodes}: {@link #preL_to_ln}, {@link #postL_to_lld},
* {@link #postR_to_rld}, {@link #preR_to_ln}.
*
* <p>Runs in linear time in the input tree size. Currently requires two
* loops over input tree nodes. Can be reduced to one loop (see the code).
*/
private void postTraversalIndexing() {
int currentLeaf = -1;
int nodeForSum = -1;
int parentForSum = -1;
for(int i = 0; i < treeSize; i++) {
preL_to_ln[i] = currentLeaf;
if(isLeaf(i)) {
currentLeaf = i;
}
// This block stores leftmost leaf descendants for each node
// indexed in postorder. Used for mapping computation.
// Added by Victor.
int postl = i; // Assume that the for loop iterates postorder.
int preorder = postL_to_preL[i];
if (sizes[preorder] == 1) {
postL_to_lld[postl] = postl;
} else {
postL_to_lld[postl] = postL_to_lld[preL_to_postL[children[preorder][0]]];
}
// This block stores rightmost leaf descendants for each node
// indexed in right-to-left postorder.
// [TODO] Use postL_to_lld and postR_to_rld instead of APTED.getLLD
// and APTED.gerRLD methods, remove these method.
// Result: faster lookup of these values.
int postr = i; // Assume that the for loop iterates reversed postorder.
preorder = postR_to_preL[postr];
if (sizes[preorder] == 1) {
postR_to_rld[postr] = postr;
} else {
postR_to_rld[postr] = postR_to_rld[preL_to_postR[children[preorder][children[preorder].length-1]]];
}
// Count lchl and rchl.
// [TODO] There are no values for parent node.
if (sizes[i] == 1) {
int parent = parents[i];
if (parent > -1) {
if (parent+1 == i) {
lchl++;
} else if (preL_to_preR[parent]+1 == preL_to_preR[i]) {
rchl++;
}
}
}
// Sum up costs of deleting and inserting entire subtrees.
// Reverse the node index. Here, we need traverse nodes bottom-up.
nodeForSum = treeSize - i - 1;
parentForSum = parents[nodeForSum];
// Update myself.
preL_to_sumDelCost[nodeForSum] += costModel.del(preL_to_node[nodeForSum]);
preL_to_sumInsCost[nodeForSum] += costModel.ins(preL_to_node[nodeForSum]);
if (parentForSum > -1) {
// Update my parent.
preL_to_sumDelCost[parentForSum] += preL_to_sumDelCost[nodeForSum];
preL_to_sumInsCost[parentForSum] += preL_to_sumInsCost[nodeForSum];
}
}
currentLeaf = -1;
// [TODO] Merge with the other loop. Assume different traversal.
for(int i = 0; i < sizes[0]; i++) {
preR_to_ln[i] = currentLeaf;
if(isLeaf(preR_to_preL[i])) {
currentLeaf = i;
}
}
}
/**
* An abbreviation that uses indices to calculate the left-to-right preorder
* id of the leftmost leaf node of the given node.
*
* @param preL left-to-right preorder id of a node.
* @return left-to-right preorder id of the leftmost leaf node of preL.
*/
public int preL_to_lld(int preL) {
return postL_to_preL[postL_to_lld[preL_to_postL[preL]]];
}
/**
* An abbreviation that uses indices to calculate the left-to-right preorder
* id of the rightmost leaf node of the given node.
*
* @param preL left-to-right preorder id of a node.
* @return left-to-right preorder id of the rightmost leaf node of preL.
*/
public int preL_to_rld(int preL) {
return postR_to_preL[postR_to_rld[preL_to_postR[preL]]];
}
/**
* An abbreviation that uses indices to retrieve pointer to {@link node.Node}
* of the given node.
*
* @param postL left-to-right postorder id of a node.
* @return {@link node.Node} corresponding to postL.
*/
public Node<D> postL_to_node(int postL) {
return preL_to_node[postL_to_preL[postL]];
}
/**
* An abbreviation that uses indices to retrieve pointer to {@link node.Node}
* of the given node.
*
* @param postR right-to-left postorder id of a node.
* @return {@link node.Node} corresponding to postR.
*/
public Node<D> postR_to_node(int postR) {
return preL_to_node[postR_to_preL[postR]];
}
/**
* Returns the number of nodes in the input tree.
*
* @return number of nodes in the tree.
*/
public int getSize() {
return treeSize;
}
/**
* Verifies if node is a leaf.
*
* @param node preorder id of a node to verify.
* @return {@code true} if {@code node} is a leaf, {@code false} otherwise.
*/
public boolean isLeaf(int node) {
return sizes[node] == 1;
}
/**
* Converts {@link ArrayList} of integer values to an array. Reads all items
* in the list and copies to the output array. The size of output array equals
* the number of elements in the list.
*
* @param integers ArrayList with integer values.
* @return array with values from input ArrayList.
*/
private int[] toIntArray(ArrayList<Integer> integers) {
int ints[] = new int[integers.size()];
int i = 0;
for (Integer n : integers) {
ints[i++] = n.intValue();
}
return ints;
}
/**
* Returns the root node of the currently processed subtree in the tree
* decomposition part of APTED [1, Algorithm 1]. At each point, we have to
* know which subtree do we process.
*
* @return current subtree root node.
*/
public int getCurrentNode() {
return currentNode;
}
/**
* Stores the root nodes's preorder id of the currently processes subtree.
*
* @param preorder preorder id of the root node.
*/
public void setCurrentNode(int preorder) {
currentNode = preorder;
}
}
| 18,924 | 31.295222 | 107 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/apted/node/StringNodeData.java
|
/* MIT License
*
* Copyright (c) 2017 Mateusz Pawlik
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package apted.node;
/**
* Represents a node label that consists of a single string value. Such label
* belongs to a node.
*
* @see Node
*/
public class StringNodeData {
/**
* The label of a node.
*/
private String label;
/**
* Constructs node data with a specified label.
*
* @param label string label of a node.
*/
public StringNodeData(String label) {
this.label = label;
}
/**
* Returns the label of a node.
*
* @return node label.
*/
public String getLabel() {
return label;
}
}
| 1,681 | 28.508772 | 80 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/apted/parser/BracketStringInputParser.java
|
/* MIT License
*
* Copyright (c) 2017 Mateusz Pawlik, Nikolaus Augsten
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package apted.parser;
import java.util.List;
import apted.node.Node;
import apted.node.StringNodeData;
import apted.util.FormatUtilities;
// [TODO] Make this parser independent from FormatUtilities - move here relevant elements.
/**
* Parser for the input trees in the bracket notation with a single string-value
* label of type {@link StringNodeData}.
*
* <p>Bracket notation encodes the trees with nested parentheses, for example,
* in tree {A{B{X}{Y}{F}}{C}} the root node has label A and two children with
* labels B and C. Node with label B has three children with labels X, Y, F.
*
* @see Node
* @see StringNodeData
*/
public class BracketStringInputParser implements InputParser<StringNodeData> {
/**
* Parses the input tree as a string and converts it to our tree
* representation using the {@link Node} class.
*
* @param s input tree as string in bracket notation.
* @return tree representation of the bracket notation input.
* @see Node
*/
@Override
public Node<StringNodeData> fromString(String s) {
s = s.substring(s.indexOf("{"), s.lastIndexOf("}") + 1);
Node<StringNodeData> node = new Node<>(new StringNodeData(FormatUtilities.getRoot(s)));
List<String> c = FormatUtilities.getChildren(s);
for (String element : c)
node.addChild(fromString(element));
return node;
}
}
| 2,500 | 37.476923 | 91 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/apted/parser/InputParser.java
|
/* MIT License
*
* Copyright (c) 2017 Mateusz Pawlik
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package apted.parser;
import apted.node.Node;
/**
* This interface specifies methods (currently only one) that must be
* implemented for a custom input parser.
*
* @param <D> the type of node data.
*/
public interface InputParser<D> {
/**
* Converst the input tree passed as string (e.g., bracket notation, XML)
* into the tree structure.
*
* @param s input tree as string.
* @return tree structure.
*/
public Node<D> fromString(String s);
}
| 1,607 | 34.733333 | 80 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/apted/util/APTEDtest.java
|
package apted.util;
import java.util.List;
import apted.costmodel.StringUnitCostModel;
import apted.distance.APTED;
import apted.node.Node;
import apted.node.StringNodeData;
import apted.parser.BracketStringInputParser;
public class APTEDtest {
public static void main (String args[]) throws Exception{
String srcTree = "{decl_stmt{decl{type{name{name}{operator}{name}}}{name}}}";
String dstTree = "{expr_stmt{expr{call{name{name}{operator}{name}}{argument_list{argument{expr{name}}}{argument{expr{name}}}}}}}";
BracketStringInputParser parser = new BracketStringInputParser();
Node<StringNodeData> t1 = parser.fromString(srcTree);
Node<StringNodeData> t2 = parser.fromString(dstTree);
// Initialise APTED.
APTED<StringUnitCostModel, StringNodeData> apted = new APTED<>(new StringUnitCostModel());
// Execute APTED.
float result = apted.computeEditDistance(t1, t2);
System.out.println(result);
List<int[]> editMapping = apted.computeEditMapping();
for (int[] nodeAlignment : editMapping) {
System.out.println(nodeAlignment[0] + "->" + nodeAlignment[1]);
}
}
}
| 1,112 | 34.903226 | 132 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/apted/util/CommandLine.java
|
/* MIT License
*
* Copyright (c) 2017 Mateusz Pawlik
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package apted.util;
import java.io.BufferedReader;
import java.io.FileReader;
import java.util.Date;
import java.util.List;
import apted.costmodel.CostModel;
import apted.costmodel.StringUnitCostModel;
import apted.distance.APTED;
import apted.node.Node;
import apted.node.StringNodeData;
import apted.parser.BracketStringInputParser;
import apted.parser.InputParser;
/**
* This is the command line interface for executing APTED algorithm.
*
* @param <C> type of cost model.
* @param <P> type of input parser.
* @see CostModel
* @see InputParser
*/
public class CommandLine<C extends CostModel, P extends InputParser> {
private String helpMessage =
"\n" +
"Compute the edit distance between two trees.\n" +
"\n" +
"SYNTAX\n" +
"\n" +
" java -jar APTED.jar {-t TREE1 TREE2 | -f FILE1 FILE2} [-m] [-v]\n" +
"\n" +
" java -jar APTED.jar -h\n" +
"\n" +
"DESCRIPTION\n" +
"\n" +
" Compute the edit distance between two trees with APTED algorithm [1,2].\n" +
" APTED supersedes our RTED algorithm [3].\n" +
" By default unit cost model is supported where each edit operation\n" +
" has cost 1 (in case of equal labels the cost is 0).\n" +
"\n" +
" For implementing other cost models see the details on github website\n" +
" (https://github.com/DatabaseGroup/apted).\n" +
"\n" +
"LICENCE\n" +
"\n" +
" The source code of this program is published under the MIT licence and\n" +
" can be found on github (https://github.com/DatabaseGroup/apted).\n" +
"\n" +
"OPTIONS\n" +
"\n" +
" -h, --help \n" +
" print this help message.\n" +
"\n" +
" -t TREE1 TREE2,\n" +
" --trees TREE1 TREE2\n" +
" compute the tree edit distance between TREE1 and TREE2. The\n" +
" trees are encoded in the bracket notation, for example, in tree\n" +
" {A{B{X}{Y}{F}}{C}} the root node has label A and two children\n" +
" with labels B and C. B has three children with labels X, Y, F.\n" +
"\n" +
" -f FILE1 FILE2, \n" +
" --files FILE1 FILE2\n" +
" compute the tree edit distance between the two trees stored in\n" +
" the files FILE1 and FILE2. The trees are encoded in bracket\n" +
" notation.\n" +
// "\n" +
// " -c CD CI CR, \n" +
// " --costs CD CI CR\n" +
// " set custom cost for edit operations. Default is -c 1 1 1.\n" +
// " CD - cost of node deletion\n" +
// " CI - cost of node insertion\n" +
// " CR - cost of node renaming\n" +
"\n" +
" -v, --verbose\n" +
" print verbose output, including tree edit distance, runtime,\n" +
" number of relevant subproblems and strategy statistics.\n" +
"\n" +
" -m, --mapping\n" +
" compute the minimal edit mapping between two trees. There might\n" +
" be multiple minimal edit mappings. This option computes only one\n" +
" of them. The first line of the output is the cost of the mapping.\n" +
" The following lines represent the edit operations. n and m are\n" +
" postorder IDs (beginning with 1) of nodes in the left-hand and\n" +
" the right-hand trees respectively.\n" +
" n->m - rename node n to m\n" +
" n->0 - delete node n\n" +
" 0->m - insert node m\n" +
"EXAMPLES\n" +
"\n" +
" java -jar APTED.jar -t {a{b}{c}} {a{b{d}}}\n" +// -c 1 1 0.5\n" +
" java -jar APTED.jar -f 1.tree 2.tree\n" +
" java -jar APTED.jar -t {a{b}{c}} {a{b{d}}} -m -v\n" +
"\n" +
"REFERENCES\n" +
"\n" +
" [1] M. Pawlik and N. Augsten. Efficient Computation of the Tree Edit\n" +
" Distance. ACM Transactions on Database Systems (TODS) 40(1). 2015.\n" +
" [2] M. Pawlik and N. Augsten. Tree edit distance: Robust and memory-\n" +
" efficient. Information Systems 56. 2016.\n" +
" [3] M. Pawlik and N. Augsten. RTED: A Robust Algorithm for the Tree Edit\n" +
" Distance. PVLDB 5(4). 2011.\n" +
"\n" +
"AUTHORS\n" +
"\n" +
" Mateusz Pawlik, Nikolaus Augsten";
// TODO: Review if all fields are necessary.
private String wrongArgumentsMessage = "Wrong arguments. Try \"java -jar RTED.jar --help\" for help.";
private boolean run, custom, array, strategy, ifSwitch, sota, verbose, demaine, mapping;
private int sotaStrategy;
private String customStrategy, customStrategyArrayFile;
private APTED rted;
private double ted;
private C costModel;
private P inputParser;
private Node t1;
private Node t2;
/**
* Constructs the command line. Initialises the cost model and input parser
* of specific types.
*
* @param costModel instance of a specific cost model.
* @param inputParser instance of a specific inputParser.
* @see CostModel
* @see InputParser
*/
public CommandLine(C costModel, P inputParser) {
this.costModel = costModel;
this.inputParser = inputParser;
}
/**
* Main method, invoced when executing the jar file.
*
* @param args array of command line arguments passed when executing jar file.
*/
public static void main(String[] args) {
CommandLine<StringUnitCostModel, BracketStringInputParser> rtedCL = new CommandLine<>(new StringUnitCostModel(), new BracketStringInputParser());
rtedCL.runCommandLine(args);
}
/**
* Run the command line with given arguments.
*
* @param args array of command line arguments passed when executing jar file.
*/
public void runCommandLine(String[] args) {
rted = new APTED<C, StringNodeData>(costModel);
try {
for (int i = 0; i < args.length; i++) {
if (args[i].equals("--help") || args[i].equals("-h")) {
System.out.println(helpMessage);
System.exit(0);
} else if (args[i].equals("-t") || args[i].equals("--trees")) {
parseTreesFromCommandLine(args[i+1], args[i+2]);
i = i+2;
run = true;
} else if (args[i].equals("-f") || args[i].equals("--files")) {
parseTreesFromFiles(args[i+1], args[i+2]);
i = i+2;
run = true;
// TODO: -f option temporarily disabled for refactoring.
// } else if (args[i].equals("-c") || args[i].equals("--costs")) {
// setCosts(args[i+1], args[i+2], args[i+3]);
// i = i+3;
} else if (args[i].equals("-v") || args[i].equals("--verbose")) {
verbose = true;
} else if (args[i].equals("-m") || args[i].equals("--mapping")) {
mapping = true;
} else {
System.out.println(wrongArgumentsMessage);
System.exit(0);
}
}
} catch (ArrayIndexOutOfBoundsException e) {
System.out.println("Too few arguments.");
System.exit(0);
}
if (!run) {
System.out.println(wrongArgumentsMessage);
System.exit(0);
}
long time1 = (new Date()).getTime();
ted = rted.computeEditDistance(t1, t2);
long time2 = (new Date()).getTime();
if (verbose) {
System.out.println("distance: " + ted);
System.out.println("runtime: " + ((time2 - time1) / 1000.0));
} else {
System.out.println(ted);
}
if (mapping) { // TED is computed anyways.
List<int[]> editMapping = rted.computeEditMapping();
for (int[] nodeAlignment : editMapping) {
System.out.println(nodeAlignment[0] + "->" + nodeAlignment[1]);
}
}
}
/**
* Parse two input trees from the command line and convert them to tree
* representation using {@link Node} class.
*
* @param ts1 source input tree as string.
* @param ts2 destination input tree as string.
* @see Node
*/
private void parseTreesFromCommandLine(String ts1, String ts2) {
try {
t1 = inputParser.fromString(ts1);
} catch (Exception e) {
System.out.println("TREE1 argument has wrong format");
System.exit(0);
}
try {
t2 = inputParser.fromString(ts2);
} catch (Exception e) {
System.out.println("TREE2 argument has wrong format");
System.exit(0);
}
}
/**
* Parses two input trees from given files and convert them to tree
* representation using {@link Node} class.
*
* @param fs1 path to file with source tree.
* @param fs2 path to file with destination tree.
* @see Node
*/
private void parseTreesFromFiles(String fs1, String fs2) {
try {
t1 = inputParser.fromString((new BufferedReader(new FileReader(fs1))).readLine());
} catch (Exception e) {
System.out.println("TREE1 argument has wrong format");
System.exit(0);
}
try {
t2 = inputParser.fromString((new BufferedReader(new FileReader(fs2))).readLine());
} catch (Exception e) {
System.out.println("TREE2 argument has wrong format");
System.exit(0);
}
}
// TODO: Bring the functionalitites below back to life.
// /**
// * Set custom costs for the edit operations.
// *
// * @deprecated
// * @param cds cost of deletion.
// * @param cis cost of insertion.
// * @param cms cost of rename (mapping).
// */
// private void setCosts(String cds, String cis, String cms) {
// try {
// rted.setCustomCosts(Float.parseFloat(cds), Float.parseFloat(cis), Float.parseFloat(cms));
// } catch (Exception e) {
// System.out.println("One of the costs has wrong format.");
// System.exit(0);
// }
// }
}
| 10,919 | 35.27907 | 149 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/apted/util/FormatUtilities.java
|
/* MIT License
*
* Copyright (c) 2017 Nikolaus Augsten
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package apted.util;
import java.util.ArrayList;
import java.util.Date;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
/**
* Various formatting utilities.
*
* @author Nikolaus Augsten
*
*/
public class FormatUtilities
{
public FormatUtilities()
{
}
public static String getField(int fieldNr, String line, char seperator)
{
if(line != null)
{
int pos = 0;
for(int i = 0; i < fieldNr; i++)
{
pos = line.indexOf(seperator, pos);
if(pos == -1)
return null;
pos++;
}
int pos2 = line.indexOf(seperator, pos);
String res;
if(pos2 == -1)
res = line.substring(pos);
else
res = line.substring(pos, pos2);
return res.trim();
} else
{
return null;
}
}
public static String[] getFields(String line, char separator)
{
if(line != null && !line.equals(""))
{
StringBuffer field = new StringBuffer();
LinkedList fieldArr = new LinkedList();
for(int i = 0; i < line.length(); i++)
{
char ch = line.charAt(i);
if(ch == separator)
{
fieldArr.add(field.toString().trim());
field = new StringBuffer();
} else
{
field.append(ch);
}
}
fieldArr.add(field.toString().trim());
return (String[])fieldArr.toArray(new String[fieldArr.size()]);
} else
{
return new String[0];
}
}
public static String[] getFields(String line, char separator, char quote)
{
String parse[] = getFields(line, separator);
for(int i = 0; i < parse.length; i++)
parse[i] = stripQuotes(parse[i], quote);
return parse;
}
public static String stripQuotes(String s, char quote)
{
if(s.length() >= 2 && s.charAt(0) == quote && s.charAt(s.length() - 1) == quote)
return s.substring(1, s.length() - 1);
else
return s;
}
public static String resizeEnd(String s, int size)
{
return resizeEnd(s, size, ' ');
}
public static String getRandomString(int length)
{
Date d = new Date();
Random r = new Random(d.getTime());
String str = "";
for(int i = 0; i < length; i++)
str = (new StringBuilder(String.valueOf(str))).append((char)(65 + r.nextInt(26))).toString();
return str;
}
public static String resizeEnd(String s, int size, char fillChar)
{
String res;
try
{
res = s.substring(0, size);
}
catch(IndexOutOfBoundsException e)
{
res = s;
for(int i = s.length(); i < size; i++)
res = (new StringBuilder(String.valueOf(res))).append(fillChar).toString();
}
return res;
}
public static String resizeFront(String s, int size)
{
return resizeFront(s, size, ' ');
}
public static String resizeFront(String s, int size, char fillChar)
{
String res;
try
{
res = s.substring(0, size);
}
catch(IndexOutOfBoundsException e)
{
res = s;
for(int i = s.length(); i < size; i++)
res = (new StringBuilder(String.valueOf(fillChar))).append(res).toString();
}
return res;
}
public static int matchingBracket(String s, int pos)
{
if(s == null || pos > s.length() - 1)
return -1;
char open = s.charAt(pos);
char close;
switch(open)
{
case 123: // '{'
close = '}';
break;
case 40: // '('
close = ')';
break;
case 91: // '['
close = ']';
break;
case 60: // '<'
close = '>';
break;
default:
return -1;
}
pos++;
int count;
for(count = 1; count != 0 && pos < s.length(); pos++)
if(s.charAt(pos) == open)
count++;
else
if(s.charAt(pos) == close)
count--;
if(count != 0)
return -1;
else
return pos - 1;
}
public static int getTreeID(String s)
{
if(s != null && s.length() > 0)
{
int end = s.indexOf(':', 1);
if(end == -1)
return -1;
else
return Integer.parseInt(s.substring(0, end));
} else
{
return -1;
}
}
public static String getRoot(String s)
{
if(s != null && s.length() > 0 && s.startsWith("{") && s.endsWith("}"))
{
int end = s.indexOf('{', 1);
if(end == -1)
end = s.indexOf('}', 1);
return s.substring(1, end);
} else
{
return null;
}
}
public static List<String> getChildren(String s)
{
if(s != null && s.length() > 0 && s.startsWith("{") && s.endsWith("}"))
{
List<String> children = new ArrayList<>();
int end = s.indexOf('{', 1);
if(end == -1)
return children;
String rest = s.substring(end, s.length() - 1);
for(int match = 0; rest.length() > 0 && (match = matchingBracket(rest, 0)) != -1;)
{
children.add(rest.substring(0, match + 1));
if(match + 1 < rest.length())
rest = rest.substring(match + 1);
else
rest = "";
}
return children;
} else
{
return null;
}
}
public static String parseTree(String s, List<String> children)
{
children.clear();
if(s != null && s.length() > 0 && s.startsWith("{") && s.endsWith("}"))
{
int end = s.indexOf('{', 1);
if(end == -1)
{
end = s.indexOf('}', 1);
return s.substring(1, end);
}
String root = s.substring(1, end);
String rest = s.substring(end, s.length() - 1);
for(int match = 0; rest.length() > 0 && (match = matchingBracket(rest, 0)) != -1;)
{
children.add(rest.substring(0, match + 1));
if(match + 1 < rest.length())
rest = rest.substring(match + 1);
else
rest = "";
}
return root;
} else
{
return null;
}
}
public static String commaSeparatedList(String list[])
{
StringBuffer s = new StringBuffer();
for(int i = 0; i < list.length; i++)
{
s.append(list[i]);
if(i != list.length - 1)
s.append(",");
}
return s.toString();
}
public static String commaSeparatedList(String list[], char quote)
{
StringBuffer s = new StringBuffer();
for(int i = 0; i < list.length; i++)
{
s.append((new StringBuilder(String.valueOf(quote))).append(list[i]).append(quote).toString());
if(i != list.length - 1)
s.append(",");
}
return s.toString();
}
public static String spellOutNumber(String num)
{
StringBuffer sb = new StringBuffer();
for(int i = 0; i < num.length(); i++)
{
char ch = num.charAt(i);
switch(ch)
{
case 48: // '0'
sb.append("zero");
break;
case 49: // '1'
sb.append("one");
break;
case 50: // '2'
sb.append("two");
break;
case 51: // '3'
sb.append("three");
break;
case 52: // '4'
sb.append("four");
break;
case 53: // '5'
sb.append("five");
break;
case 54: // '6'
sb.append("six");
break;
case 55: // '7'
sb.append("seven");
break;
case 56: // '8'
sb.append("eight");
break;
case 57: // '9'
sb.append("nine");
break;
default:
sb.append(ch);
break;
}
}
return sb.toString();
}
public static String substituteBlanks(String s, String subst)
{
StringBuffer sb = new StringBuffer();
for(int i = 0; i < s.length(); i++)
if(s.charAt(i) != ' ')
sb.append(s.charAt(i));
else
sb.append(subst);
return sb.toString();
}
public static String escapeLatex(String s)
{
StringBuffer sb = new StringBuffer();
for(int i = 0; i < s.length(); i++)
{
String c = (new StringBuilder(String.valueOf(s.charAt(i)))).toString();
if(c.equals("#"))
c = "\\#";
if(c.equals("&"))
c = "\\&";
if(c.equals("$"))
c = "\\$";
if(c.equals("_"))
c = "\\_";
sb.append(c);
}
return sb.toString();
}
}
| 10,916 | 25.626829 | 106 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/cost_matrix/Aggregate.java
|
package cost_matrix;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class Aggregate {
public static void CreatSet(Map<Integer, Integer> arr1,Map<Integer, Integer> arr2){
// 测试union
// int[] arr1 = { 123, 45, 1234 };
// int[] arr2 = { 123, 33, 45, 4, 123 };
// Integer[] result_union = union(arr1, arr2);
// System.out.println("求并集的结果如下:");
// for (int str : result_union)
// {
// System.out.println(str);
// }
// System.out.println("---------------------可爱的分割线------------------------");
// 测试insect
List<Integer> result_insect = intersect(arr1, arr2);
System.out.println("求交集的结果如下:");
for (int str : result_insect)
{
System.out.println(str);
}
System.out.println("---------------------可爱的分割线------------------------");
}
// 求两个数值数组的并集,利用set的元素唯一性
public static Integer[] union(int[] arr1, int[] arr2)
{
Set<Integer> set = new HashSet<>();
for (int str : arr1)
{
set.add(str);
}
for (int str : arr2)
{
set.add(str);
}
Integer[] result = new Integer[set.size()];
return set.toArray(result);
}
// 求两个数组的交集
public static ArrayList<Integer> intersect(Map<Integer, Integer> arr1, Map<Integer, Integer> arr2)
{
Map<Integer, Boolean> map = new HashMap<>();
ArrayList<Integer> list = new ArrayList<>();
Iterator iter = arr1.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry entry = (Map.Entry) iter.next();
Integer nameInteger=(Integer)entry.getKey();
Integer num1Integer=(Integer)entry.getValue();
if(arr2.containsKey(nameInteger)){
Integer num2Integer=arr2.get(nameInteger);
int small = Math.min(num1Integer, num2Integer);
for(int i=0;i<small;i++){
list.add(nameInteger);
// System.out.println(nameInteger);//输出出边序列
}
}
}
return list;
}
public static int[] ConvertInteger(Integer[] arr1){
Integer[] integers = arr1;
int[] intArray = new int[arr1.length];
for(int i=0; i < integers.length; i ++)
{
intArray[i] = integers[i].intValue();
}
return intArray;
}
public static double Travel(Map<Integer, Double> map){
double weight = 0;
Iterator iter = map.entrySet().iterator();
while(iter.hasNext()) {
Map.Entry entry =(Map.Entry)iter.next();
Double value = (Double)entry.getValue();
weight = weight+value;
}
return weight;
}
}
| 2,738 | 27.831579 | 102 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/cost_matrix/CopyFile.java
|
package cost_matrix;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.InputStream;
import java.io.PrintWriter;
public class CopyFile {
public CopyFile() {
}
/**
* 新建目录
* @param folderPath String 如 c:/fqf
* @return boolean
*/
public void newFolder(String folderPath) {
try {
String filePath = folderPath;
filePath = filePath.toString();
java.io.File myFilePath = new java.io.File(filePath);
if (!myFilePath.exists()) {
myFilePath.mkdir();
}
}
catch (Exception e) {
System.out.println("新建目录操作出错");
e.printStackTrace();
}
}
/**
* 新建文件
* @param filePathAndName String 文件路径及名称 如c:/fqf.txt
* @param fileContent String 文件内容
* @return boolean
*/
public void newFile(String filePathAndName, String fileContent) {
try {
String filePath = filePathAndName;
filePath = filePath.toString(); //取的路径及文件名
File myFilePath = new File(filePath);
/**如果文件不存在就建一个新文件*/
if (!myFilePath.exists()) {
myFilePath.createNewFile();
}
FileWriter resultFile = new FileWriter(myFilePath); //用来写入字符文件的便捷类, 在给出 File 对象的情况下构造一个 FileWriter 对象
PrintWriter myFile = new PrintWriter(resultFile); //向文本输出流打印对象的格式化表示形式,使用指定文件创建不具有自动行刷新的新 PrintWriter。
String strContent = fileContent;
myFile.println(strContent);
resultFile.close();
}
catch (Exception e) {
System.out.println("新建文件操作出错");
e.printStackTrace();
}
}
/**
* 删除文件
* @param filePathAndName String 文件路径及名称 如c:/fqf.txt
* @param fileContent String
* @return boolean
*/
public void delFile(String filePathAndName) {
try {
String filePath = filePathAndName;
filePath = filePath.toString();
java.io.File myDelFile = new java.io.File(filePath);
myDelFile.delete();
}
catch (Exception e) {
System.out.println("删除文件操作出错");
e.printStackTrace();
}
}
/**
* 删除文件夹
* @param filePathAndName String 文件夹路径及名称 如c:/fqf
* @param fileContent String
* @return boolean
*/
public void delFolder(String folderPath) {
try {
delAllFile(folderPath); //删除完里面所有内容
String filePath = folderPath;
filePath = filePath.toString();
java.io.File myFilePath = new java.io.File(filePath);
myFilePath.delete(); //删除空文件夹
}
catch (Exception e) {
System.out.println("删除文件夹操作出错");
e.printStackTrace();
}
}
/**
* 删除文件夹里面的所有文件
* @param path String 文件夹路径 如 c:/fqf
*/
public void delAllFile(String path) {
File file = new File(path);
if (!file.exists() || !file.isDirectory()) {
return;
}
String[] tempList = file.list();
File temp = null;
for (String element : tempList) {
if (path.endsWith(File.separator)) {
temp = new File(path + element);
}
else {
temp = new File(path + File.separator + element);
}
if (temp.isFile()) {
temp.delete();
}
if (temp.isDirectory()) {
delAllFile(path+"/"+ element);//先删除文件夹里面的文件
delFolder(path+"/"+ element);//再删除空文件夹
}
}
}
/**
* 复制单个文件
* @param oldPath String 原文件路径 如:c:/fqf.txt
* @param newPath String 复制后路径 如:f:/fqf.txt
* @return boolean
*/
public void copyFile(String oldPath, String newPath) {
try {
// int bytesum = 0;
int byteread = 0;
File oldfile = new File(oldPath);
if (oldfile.exists()) { //文件存在时
InputStream inStream = new FileInputStream(oldPath); //读入原文件
FileOutputStream fs = new FileOutputStream(newPath);
byte[] buffer = new byte[1444];
// int length;
while ( (byteread = inStream.read(buffer)) != -1) {
// bytesum += byteread; //字节数 文件大小
// System.out.println(bytesum);
fs.write(buffer, 0, byteread);
}
inStream.close();
}
}
catch (Exception e) {
System.out.println("复制单个文件操作出错");
e.printStackTrace();
}
}
/**
* 复制整个文件夹内容
* @param oldPath String 原文件路径 如:c:/fqf
* @param newPath String 复制后路径 如:f:/fqf/ff
* @return boolean
*/
public void copyFolder(String oldPath, String newPath) {
try {
(new File(newPath)).mkdirs(); //如果文件夹不存在 则建立新文件夹
File a=new File(oldPath);
String[] file=a.list();
File temp=null;
for (String element : file) {
if(oldPath.endsWith(File.separator)){
temp=new File(oldPath+element);
}
else{
temp=new File(oldPath+File.separator+element);
}
if(temp.isFile()){
FileInputStream input = new FileInputStream(temp);
FileOutputStream output = new FileOutputStream(newPath + "/" +
(temp.getName()).toString());
byte[] b = new byte[1024 * 5];
int len;
while ( (len = input.read(b)) != -1) {
output.write(b, 0, len);
}
output.flush();
output.close();
input.close();
}
if(temp.isDirectory()){//如果是子文件夹
copyFolder(oldPath+"/"+element,newPath+"/"+element);
}
}
}
catch (Exception e) {
System.out.println("复制整个文件夹内容操作出错");
e.printStackTrace();
}
}
/**
* 移动文件到指定目录
* @param oldPath String 如:c:/fqf.txt
* @param newPath String 如:d:/fqf.txt
*/
public void moveFile(String oldPath, String newPath) {
copyFile(oldPath, newPath);
delFile(oldPath);
}
/**
* 移动文件到指定目录
* @param oldPath String 如:c:/fqf.txt
* @param newPath String 如:d:/fqf.txt
*/
public void moveFolder(String oldPath, String newPath) {
copyFolder(oldPath, newPath);
delFolder(oldPath);
}
public static void main(String[] args){
CopyFile file = new CopyFile();
// file.newFolder("newFolder22222");
file.delAllFile("E:/1");
}
// 拷贝文件
private void copyFile2(String source, String dest) {
try {
File in = new File(source);
File out = new File(dest);
FileInputStream inFile = new FileInputStream(in);
FileOutputStream outFile = new FileOutputStream(out);
byte[] buffer = new byte[10240];
int i = 0;
while ((i = inFile.read(buffer)) != -1) {
outFile.write(buffer, 0, i);
}//end while
inFile.close();
outFile.close();
}//end try
catch (Exception e) {
}//end catch
}//end copyFile
}
| 7,592 | 27.870722 | 118 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/cost_matrix/Edge.java
|
package cost_matrix;
public class Edge {
private int Source;
private int Target;
private int weight;
public int getSource(){
return this.Source;
}
public void setSource(int Source){
this.Source=Source;
}
public int getTarget(){
return this.Target;
}
public void setTarget(int Target){
this.Target=Target;
}
public int getweight(){
return this.weight;
}
public void setweight(int weight){
this.weight=weight;
}
}
| 446 | 12.96875 | 35 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/cost_matrix/KuhnMunkres.java
|
package cost_matrix;
import java.util.Arrays;
public class KuhnMunkres {
private static int maxN;
private static int n;
private static int lenX;
private static int lenY;
private static double[][] weights;
private static boolean[] visitX;
private static boolean[] visitY;
private static double[] lx;
private static double[] ly;
private static double[] slack;
private static int[] match;
public static void main(String[] args) throws Exception{
KuhnMunkres graph = new KuhnMunkres(5);
int weight[][]={{3,4,6,4,9},{6,4,5,3,8},{7,5,3,4,2},{6,3,2,2,5}};
double[] result = new double[5];
int[][] re = getMaxBipartie(weight,result);
int len = Math.min(lenX, lenY);
System.out.println("len:"+len);
for(int i=0;i<len;i++){
System.out.println(re[i][0]+","+re[i][1]);
}
}
public KuhnMunkres( int maxN )
{
KuhnMunkres.maxN = maxN;
visitX = new boolean[maxN];
visitY = new boolean[maxN];
lx = new double[maxN];
ly = new double[maxN];
slack = new double[maxN];
match = new int[maxN];
}
public static int[][] getMaxBipartie( int weight[][], double[] result )
{
if( !preProcess(weight) )
{
result[0] = 0.0;
return null;
}
//initialize memo data for class
//initialize label X and Y
Arrays.fill(ly, 0);
Arrays.fill(lx, 0);
for( int i=0; i<n; i++ )
{
for( int j=0; j<n; j++ )
{
if( lx[i]<weights[i][j])
lx[i] = weights[i][j];
}
}
//find a match for each X point
for( int u=0; u<n; u++ )
{
Arrays.fill(slack, 0x7fffffff);
while(true)
{
Arrays.fill(visitX, false);
Arrays.fill(visitY, false);
if( findPath(u) ) //if find it, go on to the next point
break;
//otherwise update labels so that more edge will be added in
double inc = 0x7fffffff;
for( int v=0; v<n; v++ )
{
if( !visitY[v] && slack[v] < inc )
inc = slack[v];
}
for( int i=0; i<n; i++ )
{
if( visitX[i] )
lx[i] -= inc;
if( visitY[i] )
ly[i] += inc;
}
}
}
result[0] = 0.0;
for( int i=0; i<n; i++ )
{
if( match[i] >= 0 )
result[0] += weights[match[i]][i];
}
return matchResult();
}
public static int[][] matchResult()
{
int len = Math.min(lenX, lenY);
int[][] res = new int[len][2];
int count=0;
for( int i=0; i<lenY; i++ )
{
if( match[i] >=0 && match[i]<lenX )
{
res[count][0] = match[i];
res[count++][1] = i;
}
}
return res;
}
private static boolean preProcess( int[][] weight )
{
if( weight == null )
return false;
lenX = weight.length; lenY = weight[0].length;
if( lenX>maxN || lenY>maxN )
return false;
Arrays.fill(match, -1);
n = Math.max(lenX, lenY);
weights = new double[n][n];
for( int i=0; i<n; i++ )
Arrays.fill(weights[i], 0.0);
for( int i=0; i<lenX; i++ )
for( int j=0; j<lenY; j++ )
weights[i][j] = weight[i][j];
return true;
}
private static boolean findPath( int u )
{
visitX[u] = true;
for( int v=0; v<n; v++ )
{
if( !visitY[v] )
{
double temp = lx[u]+ly[v]-weights[u][v];
if( temp == 0.0 )
{
visitY[v] = true;
if( match[v] == -1 || findPath(match[v]) )
{
match[v] = u;
return true;
}
}
else
slack[v] = Math.min(slack[v], temp);
}
}
return false;
}
public int getlenX(){
return KuhnMunkres.lenX;
}
public int getlenY(){
return KuhnMunkres.lenY;
}
}
| 4,416 | 26.09816 | 76 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/cost_matrix/Matrix.java
|
package cost_matrix;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import gumtreediff.gen.srcml.SrcmlCppTreeGenerator;
import gumtreediff.tree.ITree;
import gumtreediff.tree.TreeContext;
import gumtreediff.tree.TreeUtils;
public class Matrix {
private static double edgenum1;
private static double edgenum2;
private static TreeContext tContext1;
private static TreeContext tContext2;
private static int[][] graph1;
private static int[][] graph2;
private static int[][] costmatrix;
private static ArrayList<Node> nodes1= new ArrayList<>();
private static ArrayList<Node> nodes2= new ArrayList<>();
private static ArrayList<Edge> edges1 = new ArrayList<>();
private static ArrayList<Edge> edges2 = new ArrayList<>();
private static Map<ITree, Integer> node2num1 = new HashMap<>();
private static Map<ITree, Integer> node2num2 = new HashMap<>();
private static Map<Integer, Integer> map = new HashMap<>();
private static Map<Integer, Integer> change = new HashMap<>();
public static void main(String[] args) throws Exception{
String path = "talker.cpp";
File cppfile = new File(path);
TreeContext tc1 = new SrcmlCppTreeGenerator().generateFromFile(cppfile);
ITree root1 = tc1.getRoot();
String path2 = "talker2.cpp";
File cppfile2 = new File(path2);
TreeContext tc2 = new SrcmlCppTreeGenerator().generateFromFile(cppfile2);
ITree root2 = tc2.getRoot();
double sim = GetSimilarity(root1, root2, tc1, tc2);
System.out.println("Smilarity:"+sim);
}
public static double GetSimilarity(ITree root1, ITree root2, TreeContext tc1, TreeContext tc2){
tContext1 = tc1;
tContext2 = tc2;
List<ITree> list1 = TreeUtils.preOrder(root1);
List<ITree> list2 = TreeUtils.preOrder(root2);
nodes1 = new ArrayList<>();
nodes2 = new ArrayList<>();
ReadNode(list1, list2);
int m = nodes1.size();
System.out.println("m:"+m);//测试节点总数
int n = nodes2.size();
System.out.println("n:"+n);
compare(nodes1, nodes2);
edges1 = new ArrayList<>();
edges2 = new ArrayList<>();
graph1 = new int[m][m];
graph2 = new int[n][n];
ReadEdge(list1, list2);
// System.out.println(graph1[0][2]);//测试边读入情况
Traversal(nodes1,graph1);
Traversal(nodes2,graph2);
CreatCostmatrix(graph1,graph2,nodes1,nodes2);
// String matrixpath = "matrix.txt";
// for(int i =0;i<m+n;i++){
// for(int j =0;j<m+n;j++){
// String content = String.valueOf(costmatrix[i][j]);
//// System.out.println(costmatrix[i][j]);//测试消耗矩阵读入情况
// Writetxt(matrixpath,content+",");
// }
// Writetxt(matrixpath,"\r\n");
// }
System.out.println("Hungry遍历中");
KuhnMunkres km = new KuhnMunkres(m+n);
double[] result = new double[m+n];
int[][] recostmatrix = new int[m+n][m+n];
for(int i=0;i<m+n;i++){
for(int j=0;j<m+n;j++){
recostmatrix[i][j] = -costmatrix[i][j];
}
}
int[][] re = KuhnMunkres.getMaxBipartie(recostmatrix,result);//KuhnMunkres算法计算最小代价的二分匹配
int len = Math.min(km.getlenX(), km.getlenY());
System.out.println("Hungry遍历完毕"+" len:"+len);
map = new HashMap<>();
for(int i=0;i<len;i++){
map.put(re[i][0],re[i][1]);
}
change = new HashMap<>();
Iterator iter = map.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry entry = (Map.Entry) iter.next();
Object key = entry.getKey();
Object val = entry.getValue();
int i = map.get(key).intValue();
int j = map.get(val).intValue();
if(i<m)
change.put(i, j);//构建匹配矩阵
// System.out.println(i + " " + j);//测试二分匹配序列
}
Iterator iter1 = change.entrySet().iterator();
while (iter1.hasNext()) {
Map.Entry entry = (Map.Entry) iter1.next();
Object key = entry.getKey();
Object val = entry.getValue();
System.out.println(key + " " + val);//测试匹配矩阵
}
// for(Edge edge : edges1){
// int source = edge.getSource();
// int target = edge.getTarget();
// int weight = edge.getweight();
// System.out.println(source + " " + target+ " " +weight);//测试边链表
// }
// System.out.println(nodes1.get(0).getName());
// System.out.println(nodes2.get(0).getName());
// Boolean shiBoolean = nodes1.get(0).getName().equals(nodes2.get(0).getName());
// System.out.println(shiBoolean);
long NodeCost = NodeCost(map,m,n);
long EdgeCost = EdgeCost(map,m,n);
System.out.println("NodeCost:"+NodeCost);
System.out.println("EdgeCost:"+EdgeCost);
long EditDistance = NodeCost + EdgeCost;//图编辑距离
double Similarity = EditDistance/(m+n+edgenum1+edgenum2);
System.out.println("EditDistance:"+EditDistance);//测试图编辑距离
System.out.println("Similarity:"+Similarity);//测试图相似度
// Map<Integer, Integer> map1 = nodes1.get(0).getinMap();
// Map<Integer, Integer> map2 = nodes2.get(0).getinMap();
// ArrayList<Integer> integer = new ArrayList<Integer>();
// System.out.println(Aggregate.Travel(map1));
// System.out.println(Aggregate.Travel(map2));//测试0号出边数组
// integer = Aggregate.intersect(map1, map2);
// System.out.println(integer);//测试0号出边交集
// Map<Integer, Integer> test1 = nodes1.get(0).getoutMap();
// Iterator iter = test1.entrySet().iterator();
// while (iter.hasNext()) {
// Map.Entry entry = (Map.Entry) iter.next();
// Object key = entry.getKey();
// Object val = entry.getValue();
// System.out.println(key + " " + val);//测试0号节点出度序列
// }
return Similarity;
}
public static long NodeCost(Map<Integer, Integer> map,int m,int n){
long nodecost = 0;
Iterator iter = map.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry entry = (Map.Entry) iter.next();
Integer key = (Integer)entry.getKey();
Integer val = (Integer)entry.getValue();
int i = key.intValue();
int j = val.intValue();
// System.out.println("Node:"+i + " " + j);
if(i<m && j<n){
if(!(nodes1.get(i).getName().equals(nodes2.get(j).getName()))){
// System.out.println("Node:"+i + " " + j);
// System.out.println("Node:"+nodes1.get(i).getName() + " " + nodes2.get(j).getName());
nodecost = nodecost+1;//重命名代价
}
}
else if(i<m && j>=n && j<m+n){
nodecost = nodecost+1;
}
else if(i>=m && i<m+n && j<n){
nodecost = nodecost+1;//空节点匹配真实节点cost+1
}
}
// System.out.println("count:"+ count);
return nodecost;
}
public static long EdgeCost(Map<Integer, Integer> map,int m,int n){
int count = 0;
double edgecost = 0;
int source1 = 0;
int source2 = 0;
int target1 = 0;
int target2 = 0;
double weight = 0;
int weight1 = 0;
int weight2 = 0;
int change1 = 0;
int change2 = 0;
for(Edge edge1 : edges1){
source1 = edge1.getSource();
target1 = edge1.getTarget();
weight1 = edge1.getweight();
change1 = change.get(Integer.valueOf(source1)).intValue();
change2 = change.get(Integer.valueOf(target1)).intValue();
// System.out.println("change1:"+ change1);
// System.out.println("change2:"+ change2);
for(Edge edge2 : edges2){
source2 = edge2.getSource();
target2 = edge2.getTarget();
if(change1 == source2 && change2 == target2){
// count++;//不考虑权重使用count
// System.out.println(source1+">"+source2+" "+target1+">"+target2);//查看匹配序列
weight2 = edge2.getweight();
weight = Math.min(weight1, weight2);
edgecost = edgecost+weight;//是否考虑权重?
// System.out.println("weight:"+ weight);
}
}
}
// edgecost = count;
System.out.println("count:"+ count);
// System.out.println("edgecost:"+ edgecost);
edgecost = edgenum1+edgenum2-2*edgecost;
return (long)edgecost;
}
public static int[][] CreatCostmatrix(int[][] graph1,int[][] graph2,ArrayList<Node> nodes1,ArrayList<Node> nodes2){
System.out.println("创建消耗矩阵中");
int m = nodes1.size();
int n = nodes2.size();
double weight_out1;
double weight_out2;
Map<Integer, Double> outmap1 = new HashMap<>();
Map<Integer, Double> outmap2 = new HashMap<>();
costmatrix = new int[m+n][m+n];
int outedge_cost = 0;
int relabel_cost = 0;
for(int i = 0;i < m;i++){
outmap1 = nodes1.get(i).getoutMap();
// for(int in : inset1){
// System.out.println("inedge1:"+in);
// }
weight_out1 = Aggregate.Travel(outmap1);
for(int j = 0;j < n;j++){
outmap2 = nodes2.get(j).getoutMap();
// for(int in : inset2){
// System.out.println("inedge2:"+in);
// }
weight_out2 = Aggregate.Travel(outmap2);
if(!(nodes1.get(i).getName().equals(nodes2.get(j).getName())))
relabel_cost = 1;
else relabel_cost = 0;
outedge_cost = (int) Math.abs(weight_out1-weight_out2);
// System.out.println("["+i+"]["+j+"]:");
System.out.println("outedge_cost:"+outedge_cost);
costmatrix[i][j] = relabel_cost+outedge_cost;//左上角矩阵cost
}
}
// System.out.println("costmatrix:"+costmatrix[1][1]);
for(int i = 0;i<m;i++){
outmap1 = nodes1.get(i).getoutMap();
weight_out1 = Aggregate.Travel(outmap1);
outedge_cost = (int) weight_out1;
for(int j = n;j<m+n;j++){
costmatrix[i][j] = 0x7fffffff;//设置非对角线消耗为无穷大
}
costmatrix[i][n+i] = 1+ outedge_cost;//左下角矩阵
}
for(int j = 0;j<n;j++){
outmap2 = nodes2.get(j).getoutMap();
weight_out2 = Aggregate.Travel(outmap2);
outedge_cost = (int) weight_out2;
for(int i = m;i<m+n;i++){
costmatrix[i][j] = 0x7fffffff;
}
costmatrix[m+j][j] = 1+ outedge_cost;//右上角矩阵
}//右下角矩阵为空
return costmatrix;
}
public static void compare(ArrayList<Node> nodes1,ArrayList<Node> nodes2){
int m = nodes1.size();
int n = nodes2.size();
for(int i=0;i<m;i++){
String name1 = nodes1.get(i).getName();
for(int j=0;j<n;j++){
String name2 = nodes2.get(j).getName();
if(name1.equals(name2))
break;
else if(j == n-1)
System.out.println("name:"+name1);
}
}
}
public static void Traversal(ArrayList<Node> nodes,int[][] graph){
System.out.println("遍历中");
int size = nodes.size();
for(int i = 0;i < size;i++){
for(int j = 0;j<size;j++){
// if(graph[j][i]!=0){
// Node in = nodes.get(j);
// int innum = nodes.get(j).getNum();
//// System.out.println(innum);//检查入边数组
// nodes.get(i).setParent(in);
// nodes.get(i).setinMap(innum, graph[j][i]);//不考虑权重就设为1
// }
if(graph[i][j]!=0){
Node out = nodes.get(j);
int outnum = nodes.get(j).getNum();
// System.out.println(outnum);//检查出边数组
nodes.get(i).setChild(out);
nodes.get(i).setoutMap(outnum, graph[i][j]);//不考虑权重就设为1
}
}
}
}//图遍历写入边出边
public static void ReadEdge(List<ITree> list1, List<ITree> list2){
edgenum1 = 0;
edgenum2 = 0;
for(ITree t : list1) {
List<ITree> children = t.getChildren();
int src = node2num1.get(t);
if(children.size()!=0) {
for(ITree child : children) {
Edge edge = new Edge();
int dst = node2num1.get(child);
edge.setSource(src);
edge.setTarget(dst);
edge.setweight(1);//weight is 1 now as a temp.
edges1.add(edge);
System.out.println(src+"->"+dst);
graph1[src][dst] = 1;
edgenum1 = edgenum1+1;//是否考虑权重
}
}
}
for(ITree t : list2) {
List<ITree> children = t.getChildren();
int src = node2num2.get(t);
if(children.size()!=0) {
for(ITree child : children) {
Edge edge = new Edge();
int dst = node2num2.get(child);
edge.setSource(src);
edge.setTarget(dst);
edge.setweight(1);//weight is 1 now as a temp.
edges2.add(edge);
System.out.println(src+"->"+dst);
graph2[src][dst] = 1;
edgenum2 = edgenum2+1;//是否考虑权重
}
}
}
}
public static void ReadNode(List<ITree> list1, List<ITree> list2){
for(int i=0;i<list1.size();i++) {
ITree t = list1.get(i);
Node node = new Node();
int num = i;
int typeNum = t.getType();
String attr = tContext1.getTypeLabel(t);
node.setNum(num);
node.setTypeNume(typeNum);
node.setName(attr);
nodes1.add(node);
node2num1.put(t, num);
}
for(int i=0;i<list2.size();i++) {
ITree t = list2.get(i);
Node node = new Node();
int num = i;
int typeNum = t.getType();
String attr = tContext2.getTypeLabel(t);
node.setNum(num);
node.setTypeNume(typeNum);
node.setName(attr);
nodes2.add(node);
node2num2.put(t, num);
}
}
public static ArrayList<File> getFiles(String path){
ArrayList<File> fileList=new ArrayList<>();
File fpath=new File(path);
if(fpath.exists()){
File[] files=fpath.listFiles();
for (File file : files) {
if(file.isFile()&&file.getName().contains("dat")){
fileList.add(file);
}
}
}
return fileList;
}
public static void Writetxt(String path,String Content) {
FileWriter fw = null;
try {
fw = new FileWriter(path,true);
fw.write(Content);
fw.flush();
fw.close();
} catch (IOException e1) {
e1.printStackTrace();
System.out.println("写入失败");
System.exit(-1);
}
}
}
| 13,046 | 30.667476 | 117 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/cost_matrix/Node.java
|
package cost_matrix;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
public class Node {
private int Num;
private int typeNume;
private String Name;
private ArrayList<Node> Parent = new ArrayList<>();
private ArrayList<Node> Child = new ArrayList<>();
private Map<Integer, Double> inMap = new HashMap<>();
private Map<Integer, Double> outMap = new HashMap<>();
public int getNum(){
return this.Num;
}
public void setNum(int Num){
this.Num=Num;
}
public int getTypeNume() {
return typeNume;
}
public void setTypeNume(int typeNume) {
this.typeNume = typeNume;
}
public String getName(){
return this.Name;
}
public void setName(String Name){
this.Name=Name;
}
public ArrayList<Node> getParent(){
return this.Parent;
}
public void setParent(Node Parent){
this.Parent.add(Parent);
}
public ArrayList<Node> getChild(){
return this.Child;
}
public void setChild(Node Child){
this.Child.add(Child);
}
public Map<Integer, Double> getinMap(){
return this.inMap;
}
public void setinMap(int in, double weight){
this.inMap.put(in, weight);
}
public Map<Integer, Double> getoutMap(){
return this.outMap;
}
public void setoutMap(int out, double weight){
this.outMap.put(out, weight);
}
}
| 1,282 | 16.337838 | 55 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/cost_matrix/Normalization.java
|
package cost_matrix;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
public class Normalization {
private static double[][] Smatrix;//所有文件的相似度矩阵
private static double Min;
private static double Max;
public static void getPointsList() throws IOException{
String txtPath="D:\\workspace\\eclipse4.3\\Kieker_records_analysis-kieker1.9\\file.txt";
BufferedReader br=new BufferedReader(new FileReader(txtPath));
String str="";
int size = 0;
while((str=br.readLine())!=null && str!=""){
size++;
}
br.close();
System.out.println("size:"+size);//从file中读入矩阵大小
Smatrix = new double[size][size];
String txtPath1="D:\\workspace\\eclipse4.3\\Kieker_records_analysis-kieker1.9\\test.txt";
BufferedReader br1=new BufferedReader(new FileReader(txtPath1));
String str1="";
int count=0;
double tmp = 0;
Min = 1.0;
Max = 0;
while((str1=br1.readLine())!=null && str1!=""){
String[] temp = str1.split(",");
// for(int i=0;i<temp.length-1;i++){
// System.out.println(temp[i]);
// }
for(int i=0;i<temp.length-1;i++){
if(i!=0&&!temp[i].isEmpty()){
temp[i].substring(0,1);
tmp = Double.parseDouble(temp[i]);
if(tmp!=1.0){
if(tmp<Min)
Min = tmp;
if(tmp>Max)
Max = tmp;
}
}
Smatrix[count][i] = Double.valueOf(temp[i]);
}
count++;
}
br1.close();
System.out.println("Min:"+Min);
System.out.println("Max:"+Max);//测试最大最小值
// for(int i=0;i<Smatrix.length;i++){
// for(int j=0;j<Smatrix.length;j++){
// System.out.println(Smatrix[i][j]+",");
// }
// }//测试矩阵输入
int num = 0;
for(int i=0;i<size;i++){
for(int j=0;j<size;j++){
if(Smatrix[i][j]>0.3&&Smatrix[i][j]!=1){
System.out.println(Smatrix[i][j]+","+i+","+j);
num++;
}
}
}
System.out.println("num:"+num);
// String outpath = "D:\\workspace\\eclipse4.3\\Kieker_records_analysis-kieker1.9\\test1.txt";
// double x,x1 =0;
// for(int i=0;i<size;i++){
// for(int j=0;j<size;j++){
// x = Smatrix[i][j];
// x1 = (x-Min)/(Max-Min);
// Writetxt(outpath,x1+", ");
// }
// Writetxt(outpath,"\r\n");
// }//数值归一化
}
public static void Writetxt(String path,String Content) {
FileWriter fw = null;
try {
fw = new FileWriter(path,true);
fw.write(Content);
fw.flush();
fw.close();
} catch (IOException e1) {
e1.printStackTrace();
System.out.println("写入失败");
System.exit(-1);
}
}
public static void main(String[] args) throws IOException {
getPointsList();
}
}
| 2,645 | 24.68932 | 95 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/cost_matrix/Test.java
|
package cost_matrix;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map.Entry;
public class Test {
private static HashMap<Integer, String> files = new HashMap<>();
private static double[][] Smatrix;
/**
* 比较包内外的相似度
*/
public static void main(String[] args) throws Exception {
Test test = new Test();
String path1 = "file.txt";
String path2 = "test.txt";
String path3 = "target.txt";
ArrayList<String> targetfile = new ArrayList<>();
test.ReadFile(path1, path2);
File file = new File(path3);
BufferedReader br = new BufferedReader(new FileReader(file));
String line = "";
while((line=br.readLine())!=null){
String tmp = line;
targetfile.add(tmp);
}
br.close();
// for(int i=0;i<targetfile.size();i++){
// String tmp = targetfile.get(i);
// double avg = test.avgS(tmp);//计算相同文件夹相似度
// System.out.println("avg:"+avg);
// System.out.println("--------------");
// }
double[][] matrix = new double[targetfile.size()][targetfile.size()];
for(int i=0;i<targetfile.size();i++){
String name1 = targetfile.get(i);
for(int j=0;j<targetfile.size();j++){
String name2 = targetfile.get(j);
if(name1.equals(name2))
continue;
double avg = test.avgS(name1, name2);//计算不同文件夹间相似度
System.out.println("name1:"+name1+" name2:"+name2);
System.out.println("avg:"+avg);
System.out.println("--------------");
matrix[i][j] = avg;
}
}
String path4 = "matrix.csv";
File file2 = new File(path4);
BufferedWriter wr = new BufferedWriter(new FileWriter(file2));
for (String element : targetfile) {
wr.append(element+",");
}
wr.newLine();
wr.flush();
for(int i=0;i<targetfile.size();i++){
for(int j=0;j<targetfile.size();j++){
if(i==j){
wr.append("1,");
continue;
}
wr.append(String.valueOf(matrix[i][j])+",");
}
wr.newLine();
wr.flush();
}//输出包间相似度矩阵
wr.close();
String path5 = "matrix1.csv";
File file3 = new File(path5);
BufferedWriter wr1 = new BufferedWriter(new FileWriter(file3));
for(int i=0;i<targetfile.size();i++){
double avg = 0.0;
double max = 0.0;
double min = 1.0;
String maxname = "";
for(int j=0;j<targetfile.size();j++){
double tmp = matrix[i][j];
if(i==j)
continue;
avg = avg+tmp;
if(tmp>max){
max = tmp;
maxname = targetfile.get(j);
}
if(tmp<min)
min = tmp;
}
avg = avg/(targetfile.size()-1);//平均包间相似度
wr1.append(targetfile.get(i)+",");
wr1.append(String.valueOf(avg)+",");
wr1.append(String.valueOf(max)+",");
wr1.append(maxname+",");
wr1.append(String.valueOf(min)+",");
wr1.newLine();
wr1.flush();
}
wr1.close();
}
public void ReadFile(String path1, String path2) throws IOException{
File file = new File(path1);
BufferedReader br = new BufferedReader(new FileReader(file));
String line = "";
while((line=br.readLine())!=null){
String[] lines = line.split(",");
files.put(Integer.valueOf(lines[0]), lines[1]);
}
br.close();
Smatrix = new double[files.size()][files.size()];
File file1 = new File(path2);
BufferedReader br1 = new BufferedReader(new FileReader(file1));
String line1 = "";
int num = 0;
while((line1=br1.readLine())!=null){
String[] lines = line1.split(",");
for(int i=0;i<lines.length-1;i++){
String tmp = lines[i];
if(tmp.substring(0, 1).equals(" ")){
double sm = Double.valueOf(tmp.substring(1, tmp.length()));
Smatrix[num][i] = sm;
}else {
double sm = Double.valueOf(tmp);
Smatrix[num][i] = sm;
}
}
num++;
}
br1.close();
}//读取相似度矩阵和文件名
public double avgS(String name1) throws Exception{
ArrayList<Double> avgs = new ArrayList<>();
ArrayList<Integer> files1 = new ArrayList<>();
for(Entry<Integer, String> entry : files.entrySet()){
int key = entry.getKey();
String value = entry.getValue();
if(value.contains(name1)){
files1.add(key);
}
}//矩阵中读入所需文件名对应信息
System.out.println("size:"+files1.size());
if(files1.isEmpty())
throw new Exception("不能为空!");
double avg = 0.0;
for (Integer x : files1) {
for (Integer y : files1) {
if(Smatrix[x][y]==1.0)
continue;
avg = avg+Smatrix[x][y];
}
}
avg = avg/(files1.size()*(files1.size()-1));
return avg;
}//计算同文件夹之间相似度
public double avgS(String name1, String name2) throws Exception{
ArrayList<Integer> files1 = new ArrayList<>();
ArrayList<Integer> files2 = new ArrayList<>();
ArrayList<Double> avgs = new ArrayList<>();
for(Entry<Integer, String> entry : files.entrySet()){
int key = entry.getKey();
String value = entry.getValue();
if(value.contains(name1)){
files1.add(key);
}
if(value.contains(name2)){
files2.add(key);
}
}//矩阵中读入所需文件名对应信息
if(files1.isEmpty()||files2.isEmpty())
throw new Exception("不能为空!");
for(int i=0;i<files1.size();i++){
double avg = 0.0;
int x = files1.get(i);
for (Integer y : files2) {
avg= avg+Smatrix[x][y];
}
for (Integer y : files2) {
avg= avg+Smatrix[y][x];
}
avg = avg/(2*files2.size());//此处不用-1
System.out.println(avg);
avgs.add(avg);
}
double avg = 0.0;
for (Double tmp : avgs) {
avg = avg+tmp;
}
avg = avg/files1.size();
return avg;
}//计算不同文件夹之间相似度
}
| 5,427 | 25.096154 | 71 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/actions/ActionClusterFinder.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2017 Jean-Rémy Falleri <[email protected]>
*/
package gumtreediff.actions;
import java.util.List;
import java.util.Set;
import org.jgrapht.DirectedGraph;
import org.jgrapht.alg.ConnectivityInspector;
import org.jgrapht.graph.DefaultDirectedGraph;
import org.jgrapht.graph.DefaultEdge;
import gumtreediff.actions.model.Action;
import gumtreediff.actions.model.Delete;
import gumtreediff.actions.model.Insert;
import gumtreediff.actions.model.Move;
import gumtreediff.actions.model.Update;
import gumtreediff.tree.TreeContext;
public class ActionClusterFinder {
private TreeContext src;
private TreeContext dst;
private List<Action> actions;
private DirectedGraph<Action, DefaultEdge> graph;
private List<Set<Action>> clusters;
public ActionClusterFinder(TreeContext src, TreeContext dst, List<Action> actions) {
this.src = src;
this.dst = dst;
this.actions = actions;
graph = new DefaultDirectedGraph<>(DefaultEdge.class);
for (Action a: actions)
graph.addVertex(a);
for (Action a1: actions) {
for (Action a2: actions) {
if (a1 != a2) {
if (embeddedInserts(a1, a2) || sameValueUpdates(a1, a2)
|| sameParentMoves(a1, a2) || embeddedDeletes(a1, a2))
graph.addEdge(a1, a2);
}
}
}
ConnectivityInspector alg = new ConnectivityInspector(graph);
clusters = alg.connectedSets();
}
public List<Set<Action>> getClusters() {
return clusters;
}
private boolean embeddedInserts(Action a1, Action a2) {
if (!(a1 instanceof Insert && a2 instanceof Insert))
return false;
Insert i1 = (Insert) a1;
Insert i2 = (Insert) a2;
if (i2.getParent().equals(i1.getNode()))
return true;
else
return false;
}
private boolean embeddedDeletes(Action a1, Action a2) {
if (!(a1 instanceof Delete && a2 instanceof Delete))
return false;
Delete d1 = (Delete) a1;
Delete d2 = (Delete) a2;
if (d2.getNode().getParent() == null)
return false;
if (d2.getNode().getParent().equals(d1.getNode()))
return true;
else
return false;
}
private boolean sameParentMoves(Action a1, Action a2) {
if (!(a1 instanceof Move && a2 instanceof Move))
return false;
Move m1 = (Move) a1;
Move m2 = (Move) a2;
if ((m1.getNode() == null) || (m2.getNode() == null))
return false;
if (m1.getNode().getParent().equals(m2.getNode().getParent()))
return true;
else
return false;
}
private boolean sameValueUpdates(Action a1, Action a2) {
if (!(a1 instanceof Update && a2 instanceof Update))
return false;
Update u1 = (Update) a1;
Update u2 = (Update) a2;
if (u1.getValue().equals(u2.getValue()))
return true;
else
return false;
}
public String getClusterLabel(Set<Action> cluster) {
if (cluster.size() == 0)
return "Unknown cluster type";
Action first = cluster.iterator().next();
if (first instanceof Insert) {
Insert root = null;
for (Action a : cluster)
if (graph.inDegreeOf(a) == 0)
root = (Insert) a;
return root.format(src);
} else if (first instanceof Move) {
Move m = (Move) first;
return "MOVE from " + m.getParent().toPrettyString(src);
} else if (first instanceof Update) {
Update u = (Update) first;
return "UPDATE from " + first.getNode().getLabel() + " to " + u.getValue();
} else if (first instanceof Delete) {
Delete root = null;
for (Action a : cluster)
if (graph.inDegreeOf(a) == 0)
root = (Delete) a;
return root.format(src);
} else
return "Unknown cluster type";
}
}
| 4,867 | 31.026316 | 88 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/actions/ActionGenerator.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2011-2015 Jean-Rémy Falleri <[email protected]>
* Copyright 2011-2015 Floréal Morandat <[email protected]>
*/
package gumtreediff.actions;
import gnu.trove.map.TIntObjectMap;
import gnu.trove.map.hash.TIntObjectHashMap;
import gumtreediff.actions.model.*;
import gumtreediff.matchers.Mapping;
import gumtreediff.matchers.MappingStore;
import gumtreediff.tree.AbstractTree;
import gumtreediff.tree.ITree;
import gumtreediff.tree.TreeUtils;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
public class ActionGenerator {
private ITree origSrc;
private ITree newSrc;
private ITree origDst;
private MappingStore origMappings;
private MappingStore newMappings;
private Set<ITree> dstInOrder;
private Set<ITree> srcInOrder;
private int lastId;
private List<Action> actions;
private TIntObjectMap<ITree> origSrcTrees;
private TIntObjectMap<ITree> cpySrcTrees;
public ActionGenerator(ITree src, ITree dst, MappingStore mappings) {
this.origSrc = src;
this.newSrc = this.origSrc.deepCopy();
this.origDst = dst;
origSrcTrees = new TIntObjectHashMap<>();
for (ITree t: origSrc.getTrees())
origSrcTrees.put(t.getId(), t);
cpySrcTrees = new TIntObjectHashMap<>();
for (ITree t: newSrc.getTrees())
cpySrcTrees.put(t.getId(), t);
origMappings = new MappingStore();
for (Mapping m: mappings)
this.origMappings.link(cpySrcTrees.get(m.getFirst().getId()), m.getSecond());
this.newMappings = origMappings.copy();
}
public List<Action> getActions() {
return actions;
}
public List<Action> generate() {
ITree srcFakeRoot = new AbstractTree.FakeTree(newSrc);
ITree dstFakeRoot = new AbstractTree.FakeTree(origDst);
newSrc.setParent(srcFakeRoot);
origDst.setParent(dstFakeRoot);
actions = new ArrayList<>();
dstInOrder = new HashSet<>();
srcInOrder = new HashSet<>();
lastId = newSrc.getSize() + 1;
newMappings.link(srcFakeRoot, dstFakeRoot);
List<ITree> bfsDst = TreeUtils.breadthFirst(origDst);
for (ITree x: bfsDst) {
ITree w = null;
ITree y = x.getParent();
ITree z = newMappings.getSrc(y);
if (!newMappings.hasDst(x)) {
int k = findPos(x);
// Insertion case : insert new node.
w = new AbstractTree.FakeTree();
w.setId(newId());
// In order to use the real nodes from the second tree, we
// furnish x instead of w and fake that x has the newly
// generated ID.
Action ins = new Insert(x, origSrcTrees.get(z.getId()), k);
actions.add(ins);
//System.out.println(ins);
origSrcTrees.put(w.getId(), x);
newMappings.link(w, x);
z.getChildren().add(k, w);
w.setParent(z);
} else {
w = newMappings.getSrc(x);
if (!x.equals(origDst)) { // TODO => x != origDst // Case of the root
ITree v = w.getParent();
if (!w.getLabel().equals(x.getLabel())) {
actions.add(new Update(origSrcTrees.get(w.getId()), x.getLabel()));
w.setLabel(x.getLabel());
}
if (!z.equals(v)) {
int k = findPos(x);
Action mv = new Move(origSrcTrees.get(w.getId()), origSrcTrees.get(z.getId()), k);
actions.add(mv);
//System.out.println(mv);
int oldk = w.positionInParent();
z.getChildren().add(k, w);
w.getParent().getChildren().remove(oldk);
w.setParent(z);
}
}
}
//FIXME not sure why :D
srcInOrder.add(w);
dstInOrder.add(x);
alignChildren(w, x);
}
for (ITree w : newSrc.postOrder()) {
if (!newMappings.hasSrc(w)) {
actions.add(new Delete(origSrcTrees.get(w.getId())));
//w.getParent().getChildren().remove(w);
}
}
//FIXME should ensure isomorphism.
return actions;
}
private void alignChildren(ITree w, ITree x) {
srcInOrder.removeAll(w.getChildren());
dstInOrder.removeAll(x.getChildren());
List<ITree> s1 = new ArrayList<>();
for (ITree c: w.getChildren())
if (newMappings.hasSrc(c))
if (x.getChildren().contains(newMappings.getDst(c)))
s1.add(c);
List<ITree> s2 = new ArrayList<>();
for (ITree c: x.getChildren())
if (newMappings.hasDst(c))
if (w.getChildren().contains(newMappings.getSrc(c)))
s2.add(c);
List<Mapping> lcs = lcs(s1, s2);
for (Mapping m : lcs) {
srcInOrder.add(m.getFirst());
dstInOrder.add(m.getSecond());
}
for (ITree a : s1) {
for (ITree b: s2 ) {
if (origMappings.has(a, b)) {
if (!lcs.contains(new Mapping(a, b))) {
int k = findPos(b);
Action mv = new Move(origSrcTrees.get(a.getId()), origSrcTrees.get(w.getId()), k);
actions.add(mv);
//System.out.println(mv);
int oldk = a.positionInParent();
w.getChildren().add(k, a);
if (k < oldk ) // FIXME this is an ugly way to patch the index
oldk ++;
a.getParent().getChildren().remove(oldk);
a.setParent(w);
srcInOrder.add(a);
dstInOrder.add(b);
}
}
}
}
}
private int findPos(ITree x) {
ITree y = x.getParent();
List<ITree> siblings = y.getChildren();
for (ITree c : siblings) {
if (dstInOrder.contains(c)) {
if (c.equals(x)) return 0;
else break;
}
}
int xpos = x.positionInParent();
ITree v = null;
for (int i = 0; i < xpos; i++) {
ITree c = siblings.get(i);
if (dstInOrder.contains(c)) v = c;
}
//if (v == null) throw new RuntimeException("No rightmost sibling in order");
if (v == null) return 0;
ITree u = newMappings.getSrc(v);
// siblings = u.getParent().getChildren();
// int upos = siblings.indexOf(u);
int upos = u.positionInParent();
// int r = 0;
// for (int i = 0; i <= upos; i++)
// if (srcInOrder.contains(siblings.get(i))) r++;
return upos + 1;
}
private int newId() {
return ++lastId;
}
private List<Mapping> lcs(List<ITree> x, List<ITree> y) {
int m = x.size();
int n = y.size();
List<Mapping> lcs = new ArrayList<>();
int[][] opt = new int[m + 1][n + 1];
for (int i = m - 1; i >= 0; i--) {
for (int j = n - 1; j >= 0; j--) {
if (newMappings.getSrc(y.get(j)).equals(x.get(i))) opt[i][j] = opt[i + 1][j + 1] + 1;
else opt[i][j] = Math.max(opt[i + 1][j], opt[i][j + 1]);
}
}
int i = 0, j = 0;
while (i < m && j < n) {
if (newMappings.getSrc(y.get(j)).equals(x.get(i))) {
lcs.add(new Mapping(x.get(i), y.get(j)));
i++;
j++;
} else if (opt[i + 1][j] >= opt[i][j + 1]) i++;
else j++;
}
return lcs;
}
}
| 8,722 | 32.293893 | 106 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/actions/ActionUtil.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2011-2015 Jean-Rémy Falleri <[email protected]>
* Copyright 2011-2015 Floréal Morandat <[email protected]>
*/
package gumtreediff.actions;
import java.util.List;
import gumtreediff.actions.model.Action;
import gumtreediff.actions.model.Delete;
import gumtreediff.actions.model.Insert;
import gumtreediff.actions.model.Move;
import gumtreediff.actions.model.Update;
import gumtreediff.tree.TreeContext;
public class ActionUtil {
private ActionUtil() {}
public static TreeContext apply(TreeContext context, List<Action> actions) {
for (Action a: actions) {
if (a instanceof Insert) {
Insert action = ((Insert) a);
action.getParent().insertChild(action.getNode(), action.getPosition());
} else if (a instanceof Update) {
Update action = ((Update) a);
action.getNode().setLabel(action.getValue());
} else if (a instanceof Move) {
Move action = ((Move) a);
action.getNode().getParent().getChildren().remove(action.getNode());
action.getParent().insertChild(action.getNode(), action.getPosition());
} else if (a instanceof Delete) {
Delete action = ((Delete) a);
action.getNode().getParent().getChildren().remove(action.getNode());
} else throw new RuntimeException("No such action: " + a );
}
return context;
}
}
| 2,166 | 38.4 | 87 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/actions/LeavesClassifier.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2011-2015 Jean-Rémy Falleri <[email protected]>
* Copyright 2011-2015 Floréal Morandat <[email protected]>
*/
package gumtreediff.actions;
import java.util.List;
import java.util.Set;
import gumtreediff.actions.model.Action;
import gumtreediff.actions.model.Delete;
import gumtreediff.actions.model.Insert;
import gumtreediff.actions.model.Move;
import gumtreediff.actions.model.Update;
import gumtreediff.matchers.Mapping;
import gumtreediff.matchers.Matcher;
import gumtreediff.tree.ITree;
import gumtreediff.tree.TreeContext;
public class LeavesClassifier extends TreeClassifier {
public LeavesClassifier(TreeContext src, TreeContext dst, Set<Mapping> rawMappings, List<Action> actions) {
super(src, dst, rawMappings, actions);
}
public LeavesClassifier(TreeContext src, TreeContext dst, Matcher m) {
super(src, dst, m);
}
@Override
public void classify() {
for (Action a: actions) {
if (a instanceof Delete && isLeafAction(a)) {
srcDelTrees.add(a.getNode());
} else if (a instanceof Insert && isLeafAction(a)) {
dstAddTrees.add(a.getNode());
} else if (a instanceof Update && isLeafAction(a)) {
srcUpdTrees.add(a.getNode());
dstUpdTrees.add(mappings.getDst(a.getNode()));
} else if (a instanceof Move && isLeafAction(a)) {
srcMvTrees.add(a.getNode());
dstMvTrees.add(mappings.getDst(a.getNode()));
}
}
}
private boolean isLeafAction(Action a) {
for (ITree d: a.getNode().getDescendants()) {
for (Action c: actions)
if (a != c && d == c.getNode()) return false;
}
return true;
}
}
| 2,482 | 33.486111 | 111 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/actions/RootAndLeavesClassifier.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2011-2015 Jean-Rémy Falleri <[email protected]>
* Copyright 2011-2015 Floréal Morandat <[email protected]>
*/
package gumtreediff.actions;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import gumtreediff.actions.model.Action;
import gumtreediff.actions.model.Delete;
import gumtreediff.actions.model.Insert;
import gumtreediff.actions.model.Move;
import gumtreediff.actions.model.Update;
import gumtreediff.matchers.Mapping;
import gumtreediff.matchers.Matcher;
import gumtreediff.tree.ITree;
import gumtreediff.tree.TreeContext;
public class RootAndLeavesClassifier extends TreeClassifier {
public RootAndLeavesClassifier(TreeContext src, TreeContext dst, Set<Mapping> rawMappings, List<Action> actions) {
super(src, dst, rawMappings, actions);
}
public RootAndLeavesClassifier(TreeContext src, TreeContext dst, Matcher m) {
super(src, dst, m);
}
@Override
public void classify() {
for (Action a: actions) {
if (a instanceof Insert) {
dstAddTrees.add(a.getNode());
} else if (a instanceof Delete) {
srcDelTrees.add(a.getNode());
} else if (a instanceof Update) {
srcUpdTrees.add(a.getNode());
dstUpdTrees.add(mappings.getDst(a.getNode()));
} else if (a instanceof Move) {
srcMvTrees.add(a.getNode());
dstMvTrees.add(mappings.getDst(a.getNode()));
}
}
Set<ITree> fDstAddTrees = new HashSet<>();
for (ITree t: dstAddTrees)
if (!dstAddTrees.contains(t.getParent()))
fDstAddTrees.add(t);
dstAddTrees = fDstAddTrees;
Set<ITree> fSrcDelTrees = new HashSet<>();
for (ITree t: srcDelTrees) {
if (!srcDelTrees.contains(t.getParent()))
fSrcDelTrees.add(t);
}
srcDelTrees = fSrcDelTrees;
}
}
| 2,658 | 33.089744 | 118 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/actions/RootsClassifier.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2011-2015 Jean-Rémy Falleri <[email protected]>
* Copyright 2011-2015 Floréal Morandat <[email protected]>
*/
package gumtreediff.actions;
import java.util.List;
import java.util.Set;
import gumtreediff.actions.model.Action;
import gumtreediff.actions.model.Delete;
import gumtreediff.actions.model.Insert;
import gumtreediff.actions.model.Move;
import gumtreediff.actions.model.Update;
import gumtreediff.matchers.Mapping;
import gumtreediff.matchers.Matcher;
import gumtreediff.tree.TreeContext;
public class RootsClassifier extends TreeClassifier {
public RootsClassifier(TreeContext src, TreeContext dst, Set<Mapping> rawMappings, List<Action> script) {
super(src, dst, rawMappings, script);
}
public RootsClassifier(TreeContext src, TreeContext dst, Matcher m) {
super(src, dst, m);
}
@Override
public void classify() {
for (Action a: actions) {
if (a instanceof Delete) srcDelTrees.add(a.getNode());
else if (a instanceof Insert)
dstAddTrees.add(a.getNode());
else if (a instanceof Update) {
srcUpdTrees.add(a.getNode());
dstUpdTrees.add(mappings.getDst(a.getNode()));
} else if (a instanceof Move) {
srcMvTrees.add(a.getNode());
dstMvTrees.add(mappings.getDst(a.getNode()));
}
}
}
}
| 2,110 | 33.606557 | 109 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/actions/TreeClassifier.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2011-2015 Jean-Rémy Falleri <[email protected]>
* Copyright 2011-2015 Floréal Morandat <[email protected]>
*/
package gumtreediff.actions;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import gumtreediff.actions.model.Action;
import gumtreediff.matchers.Mapping;
import gumtreediff.matchers.MappingStore;
import gumtreediff.matchers.Matcher;
import gumtreediff.tree.ITree;
import gumtreediff.tree.TreeContext;
public abstract class TreeClassifier {
protected Set<ITree> srcUpdTrees;
protected Set<ITree> dstUpdTrees;
protected Set<ITree> srcMvTrees;
protected Set<ITree> dstMvTrees;
protected Set<ITree> srcDelTrees;
protected Set<ITree> dstAddTrees;
protected TreeContext src;
protected TreeContext dst;
protected MappingStore mappings;
protected List<Action> actions;
public TreeClassifier(TreeContext src, TreeContext dst, Set<Mapping> rawMappings, List<Action> actions) {
this(src, dst, rawMappings);
this.actions = actions;
classify();
}
public TreeClassifier(TreeContext src, TreeContext dst, Matcher m) {
this(src, dst, m.getMappingsAsSet());
ActionGenerator g = new ActionGenerator(src.getRoot(), dst.getRoot(), m.getMappings());
g.generate();
this.actions = g.getActions();
classify();
}
private TreeClassifier(TreeContext src, TreeContext dst, Set<Mapping> rawMappings) {
this.src = src;
this.dst = dst;
this.mappings = new MappingStore(rawMappings);
this.srcDelTrees = new HashSet<>();
this.srcMvTrees = new HashSet<>();
this.srcUpdTrees = new HashSet<>();
this.dstMvTrees = new HashSet<>();
this.dstAddTrees = new HashSet<>();
this.dstUpdTrees = new HashSet<>();
}
public abstract void classify();
public Set<ITree> getSrcUpdTrees() {
return srcUpdTrees;
}
public Set<ITree> getDstUpdTrees() {
return dstUpdTrees;
}
public Set<ITree> getSrcMvTrees() {
return srcMvTrees;
}
public Set<ITree> getDstMvTrees() {
return dstMvTrees;
}
public Set<ITree> getSrcDelTrees() {
return srcDelTrees;
}
public Set<ITree> getDstAddTrees() {
return dstAddTrees;
}
}
| 3,025 | 26.761468 | 109 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/actions/model/Action.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2011-2015 Jean-Rémy Falleri <[email protected]>
* Copyright 2011-2015 Floréal Morandat <[email protected]>
*/
package gumtreediff.actions.model;
import gumtreediff.tree.ITree;
import gumtreediff.tree.TreeContext;
public abstract class Action {
protected ITree node;
public Action(ITree node) {
this.node = node;
}
public ITree getNode() {
return node;
}
public void setNode(ITree node) {
this.node = node;
}
public abstract String getName();
@Override
public abstract String toString();
public abstract String format(TreeContext ctx);
}
| 1,329 | 25.6 | 78 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/actions/model/Addition.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2011-2015 Jean-Rémy Falleri <[email protected]>
* Copyright 2011-2015 Floréal Morandat <[email protected]>
*/
package gumtreediff.actions.model;
import gumtreediff.tree.ITree;
import gumtreediff.tree.TreeContext;
public abstract class Addition extends Action {
protected ITree parent;
protected int pos;
public Addition(ITree node, ITree parent, int pos) {
super(node);
this.parent = parent;
this.pos = pos;
}
public ITree getParent() {
return parent;
}
public int getPosition() {
return pos;
}
@Override
public String toString() {
return getName() + " " + node.toShortString() + " to " + parent.toShortString() + " at " + pos;
}
@Override
public String format(TreeContext ctx) {
return getName() + " " + node.toPrettyString(ctx) + " to " + parent.toPrettyString(ctx) + " at " + pos;
}
}
| 1,624 | 27.508772 | 111 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/actions/model/Delete.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2011-2015 Jean-Rémy Falleri <[email protected]>
* Copyright 2011-2015 Floréal Morandat <[email protected]>
*/
package gumtreediff.actions.model;
import gumtreediff.tree.ITree;
import gumtreediff.tree.TreeContext;
public class Delete extends Action {
public Delete(ITree node) {
super(node);
}
@Override
public String getName() {
return "DEL";
}
@Override
public String toString() {
return getName() + " " + node.toShortString();
}
@Override
public String format(TreeContext ctx) {
return getName() + " " + node.toPrettyString(ctx);
}
}
| 1,333 | 26.791667 | 78 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/actions/model/Insert.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2011-2015 Jean-Rémy Falleri <[email protected]>
* Copyright 2011-2015 Floréal Morandat <[email protected]>
*/
package gumtreediff.actions.model;
import gumtreediff.tree.ITree;
public class Insert extends Addition {
public Insert(ITree node, ITree parent, int pos) {
super(node, parent, pos);
}
@Override
public String getName() {
return "INS";
}
}
| 1,103 | 28.837838 | 78 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/actions/model/Move.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2011-2015 Jean-Rémy Falleri <[email protected]>
* Copyright 2011-2015 Floréal Morandat <[email protected]>
*/
package gumtreediff.actions.model;
import gumtreediff.tree.ITree;
public class Move extends Addition {
public Move(ITree node, ITree parent, int pos) {
super(node, parent, pos);
}
@Override
public String getName() {
return "MOV";
}
}
| 1,099 | 28.72973 | 78 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/actions/model/Update.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2011-2015 Jean-Rémy Falleri <[email protected]>
* Copyright 2011-2015 Floréal Morandat <[email protected]>
*/
package gumtreediff.actions.model;
import gumtreediff.tree.ITree;
import gumtreediff.tree.TreeContext;
public class Update extends Action {
private String value;
public Update(ITree node, String value) {
super(node);
this.value = value;
}
@Override
public String getName() {
return "UPD";
}
public String getValue() {
return this.value;
}
@Override
public String toString() {
return getName() + " " + node.toShortString() + " from " + node.getLabel() + " to " + value;
}
@Override
public String format(TreeContext ctx) {
return getName() + " " + node.toPrettyString(ctx) + " from " + node.getLabel() + " to " + value;
}
}
| 1,559 | 27.363636 | 104 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/gen/Generators.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2011-2015 Jean-Rémy Falleri <[email protected]>
* Copyright 2011-2015 Floréal Morandat <[email protected]>
*/
package gumtreediff.gen;
import java.io.IOException;
import java.util.Arrays;
import java.util.regex.Pattern;
import gumtreediff.tree.TreeContext;
public class Generators extends Registry<String, TreeGenerator, Register> {
private static Generators registry;
public static final Generators getInstance() {
if (registry == null)
registry = new Generators();
return registry;
}
public TreeContext getTree(String file) throws UnsupportedOperationException, IOException {
TreeGenerator p = get(file);
if (p == null)
throw new UnsupportedOperationException("No generator found for file: " + file);
return p.generateFromFile(file);
}
public TreeContext getTree(String generator, String file) throws UnsupportedOperationException, IOException {
for (Entry e : entries)
if (e.id.equals(generator))
return e.instantiate(null).generateFromFile(file);
throw new UnsupportedOperationException("No generator \"" + generator + "\" found.");
}
@Override
protected Entry newEntry(Class<? extends TreeGenerator> clazz, Register annotation) {
return new Entry(annotation.id(), clazz, defaultFactory(clazz), annotation.priority()) {
final Pattern[] accept;
{
String[] accept = annotation.accept();
this.accept = new Pattern[accept.length];
for (int i = 0; i < accept.length; i++)
this.accept[i] = Pattern.compile(accept[i]);
}
@Override
protected boolean handle(String key) {
for (Pattern pattern : accept)
if (pattern.matcher(key).find())
return true;
return false;
}
@Override
public String toString() {
return String.format("%s: %s", Arrays.toString(accept), clazz.getCanonicalName());
}
};
}
}
| 2,846 | 34.5875 | 113 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/gen/Register.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2011-2015 Jean-Rémy Falleri <[email protected]>
* Copyright 2011-2015 Floréal Morandat <[email protected]>
*/
package gumtreediff.gen;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import org.atteo.classindex.IndexAnnotated;
@Retention(RetentionPolicy.RUNTIME)
@IndexAnnotated
@Target(ElementType.TYPE)
public @interface Register {
String id();
String[] accept() default { };
int priority() default Registry.Priority.MEDIUM;
}
| 1,271 | 32.473684 | 78 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/gen/Registry.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2011-2015 Jean-Rémy Falleri <[email protected]>
* Copyright 2011-2015 Floréal Morandat <[email protected]>
*/
package gumtreediff.gen;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.util.Arrays;
import java.util.Collections;
import java.util.Set;
import java.util.TreeSet;
public abstract class Registry<K, C, A> {
Set<Entry> entries = new TreeSet<>((o1, o2) -> {
int cmp = o1.priority - o2.priority;
if (cmp == 0)
cmp = o1.id.compareToIgnoreCase(o2.id); // FIXME or not ... is id a good unique stuff
return cmp;
});
public static class Priority {
public static final int MAXIMUM = 0;
public static final int HIGH = 25;
public static final int MEDIUM = 50;
public static final int LOW = 75;
public static final int MINIMUM = 100;
}
public C get(K key, Object... args) {
Factory<? extends C> factory = getFactory(key);
if (factory != null)
return factory.instantiate(args);
return null;
}
public Factory<? extends C> getFactory(K key) {
Entry entry = find(key);
if (entry != null)
return entry.factory;
return null;
}
protected Entry find(K key) {
Entry entry = findEntry(key);
if (entry == null)
return null;
return entry;
}
protected Entry findById(String id) {
for (Entry e: entries)
if (e.id.equals(id))
return e;
return null;
}
public void install(Class<? extends C> clazz, A annotation) {
Entry entry = newEntry(clazz, annotation);
entries.add(entry);
}
protected abstract Entry newEntry(Class<? extends C> clazz, A annotation);
protected Entry findEntry(K key) {
for (Entry e: entries)
if (e.handle(key))
return e;
return null;
}
public Entry findByClass(Class<? extends C> aClass) {
for (Entry e: entries)
if (e.clazz.equals(aClass))
return e;
return null;
}
public Set<Entry> getEntries() {
return Collections.unmodifiableSet(entries);
}
public abstract class Entry {
public final String id;
public final int priority;
final Class<? extends C> clazz;
final Factory<? extends C> factory;
protected Entry(String id, Class<? extends C> clazz, Factory<? extends C> factory, int priority) {
this.id = id;
this.clazz = clazz;
this.factory = factory;
this.priority = priority;
}
public C instantiate(Object[] args) {
try {
return factory.newInstance(args);
} catch (IllegalAccessException | InvocationTargetException | InstantiationException e) {
return null;
}
}
protected abstract boolean handle(K key);
@Override
public String toString() {
return id;
}
}
protected Factory<? extends C> defaultFactory(Class<? extends C> clazz, Class... signature) {
try {
Constructor<? extends C> ctor = clazz.getConstructor(signature);
return (args) -> ctor.newInstance(args);
} catch (NoSuchMethodException e) {
System.out.println(Arrays.toString(clazz.getConstructors()));
throw new RuntimeException(String.format("This is a static bug. Constructor %s(%s) not found",
clazz.getName(), Arrays.toString(signature)), e);
}
}
public interface Factory<C> {
C newInstance(Object[] args) throws IllegalAccessException, InvocationTargetException, InstantiationException;
default C instantiate(Object[] args) {
try {
return newInstance(args);
} catch (IllegalAccessException | InvocationTargetException | InstantiationException e) {
System.out.println(e.getMessage());
return null;
}
}
}
}
| 4,835 | 30.2 | 118 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/gen/Test.java
|
package gumtreediff.gen;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.List;
import gumtreediff.actions.ActionGenerator;
import gumtreediff.actions.model.Action;
import gumtreediff.gen.jdt.JdtTreeGenerator;
import gumtreediff.io.ActionsIoUtils;
import gumtreediff.matchers.Matcher;
import gumtreediff.matchers.Matchers;
import gumtreediff.tree.ITree;
import gumtreediff.tree.TreeContext;
public class Test {
public static void main(String[] args) throws IOException {
File oldFile = new File("1.java");
File newFile = new File("2.java");
// TreeContext oldTree = new SrcmlCppTreeGenerator().generateFromFile(oldFile);
// TreeContext newTree = new SrcmlCppTreeGenerator().generateFromFile(newFile);
TreeContext oldTree = new JdtTreeGenerator().generateFromFile(oldFile);
TreeContext newTree = new JdtTreeGenerator().generateFromFile(newFile);
Matcher m = Matchers.getInstance().getMatcher(oldTree.getRoot(), newTree.getRoot());
m.match();
ActionGenerator g = new ActionGenerator(oldTree.getRoot(), newTree.getRoot(), m.getMappings());
List<Action> actions = g.generate();
System.out.println("old:");
System.out.println(printItree(oldTree.getRoot(), 0));
File output = new File("output.xml");
BufferedWriter wr = new BufferedWriter(new FileWriter(output));
wr.append(ActionsIoUtils.toXml(oldTree, g.getActions(), m.getMappings()).toString());
wr.close();
// System.out.println(ActionsIoUtils.toXml(oldTree, g.getActions(), m.getMappings()).toString());
}
private static String printItree(ITree itree, int depth) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("( id: ")
.append(itree.getId())
.append(" | ")
.append(itree.toShortString())
.append("\n ");
for (ITree child : itree.getChildren()) {
for (int i = 0; i < depth; i++) {
stringBuilder.append(" ");
}
stringBuilder.append(printItree(child, depth + 1));
}
return stringBuilder.toString();
}
}
| 2,270 | 35.629032 | 104 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/gen/TreeGenerator.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2011-2015 Jean-Rémy Falleri <[email protected]>
* Copyright 2011-2015 Floréal Morandat <[email protected]>
*/
package gumtreediff.gen;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.io.StringReader;
import java.nio.charset.Charset;
import java.nio.file.Files;
import java.nio.file.Paths;
import org.atteo.classindex.IndexSubclasses;
import gumtreediff.tree.TreeContext;
@IndexSubclasses
public abstract class TreeGenerator {
protected abstract TreeContext generate(Reader r) throws IOException;
public TreeContext generateFromReader(Reader r) throws IOException {
TreeContext ctx = generate(r);
ctx.validate();
return ctx;
}
public TreeContext generateFromFile(String path) throws IOException {
return generateFromReader(Files.newBufferedReader(Paths.get(path), Charset.forName("UTF-8")));
}
public TreeContext generateFromFile(File file) throws IOException {
return generateFromReader(Files.newBufferedReader(file.toPath(), Charset.forName("UTF-8")));
}
public TreeContext generateFromStream(InputStream stream) throws IOException {
return generateFromReader(new InputStreamReader(stream, "UTF-8"));
}
public TreeContext generateFromString(String content) throws IOException {
return generateFromReader(new StringReader(content));
}
}
| 2,159 | 32.75 | 102 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/gen/jdt/AbstractJdtTreeGenerator.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2011-2015 Jean-Rémy Falleri <[email protected]>
* Copyright 2011-2015 Floréal Morandat <[email protected]>
*/
package gumtreediff.gen.jdt;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.Reader;
import java.util.Map;
import org.eclipse.jdt.core.JavaCore;
import org.eclipse.jdt.core.dom.AST;
import org.eclipse.jdt.core.dom.ASTParser;
import gumtreediff.gen.TreeGenerator;
import gumtreediff.tree.TreeContext;
public abstract class AbstractJdtTreeGenerator extends TreeGenerator {
private static char[] readerToCharArray(Reader r) throws IOException {
StringBuilder fileData = new StringBuilder();
try (BufferedReader br = new BufferedReader(r)) {
char[] buf = new char[10];
int numRead = 0;
while ((numRead = br.read(buf)) != -1) {
String readData = String.valueOf(buf, 0, numRead);
fileData.append(readData);
buf = new char[1024];
}
}
return fileData.toString().toCharArray();
}
@Override
@SuppressWarnings({ "unchecked", "rawtypes" })
public TreeContext generate(Reader r) throws IOException {
ASTParser parser = ASTParser.newParser(AST.JLS4);
parser.setKind(ASTParser.K_COMPILATION_UNIT);
Map pOptions = JavaCore.getOptions();
pOptions.put(JavaCore.COMPILER_COMPLIANCE, JavaCore.VERSION_9);
pOptions.put(JavaCore.COMPILER_CODEGEN_TARGET_PLATFORM, JavaCore.VERSION_9);
pOptions.put(JavaCore.COMPILER_SOURCE, JavaCore.VERSION_9);
pOptions.put(JavaCore.COMPILER_DOC_COMMENT_SUPPORT, JavaCore.ENABLED);
parser.setCompilerOptions(pOptions);
char[] chars = readerToCharArray(r);
parser.setSource(chars);
// for(char tmp : chars) {
// System.out.println(tmp);
// }
AbstractJdtVisitor v = createVisitor();
parser.createAST(null).accept(v);
return v.getTreeContext();
}
protected abstract AbstractJdtVisitor createVisitor();
}
| 2,752 | 36.202703 | 84 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/gen/jdt/AbstractJdtVisitor.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2011-2015 Jean-Rémy Falleri <[email protected]>
* Copyright 2011-2015 Floréal Morandat <[email protected]>
*/
package gumtreediff.gen.jdt;
import java.util.ArrayDeque;
import java.util.Deque;
import org.eclipse.jdt.core.dom.ASTNode;
import org.eclipse.jdt.core.dom.ASTVisitor;
import gumtreediff.tree.ITree;
import gumtreediff.tree.TreeContext;
public abstract class AbstractJdtVisitor extends ASTVisitor {
protected TreeContext context = new TreeContext();
private Deque<ITree> trees = new ArrayDeque<>();
public AbstractJdtVisitor() {
super(true);
}
public TreeContext getTreeContext() {
return context;
}
protected void pushNode(ASTNode n, String label) {
int type = n.getNodeType();
String typeName = n.getClass().getSimpleName();
push(type, typeName, label, n.getStartPosition(), n.getLength());
}
private void push(int type, String typeName, String label, int startPosition, int length) {
ITree t = context.createTree(type, label, typeName);
t.setPos(startPosition);
t.setLength(length);
// System.out.println(t.getId()+typeName);
if (trees.isEmpty())
context.setRoot(t);
else {
ITree parent = trees.peek();
t.setParentAndUpdateChildren(parent);
}
trees.push(t);
}
protected ITree getCurrentParent() {
return trees.peek();
}
protected void popNode() {
trees.pop();
}
}
| 2,214 | 28.144737 | 95 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/gen/jdt/JdtTreeGenerator.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2011-2015 Jean-Rémy Falleri <[email protected]>
* Copyright 2011-2015 Floréal Morandat <[email protected]>
*/
package gumtreediff.gen.jdt;
import gumtreediff.gen.Register;
import gumtreediff.gen.Registry;
@Register(id = "java-jdt", accept = "\\.java$", priority = Registry.Priority.MAXIMUM)
public class JdtTreeGenerator extends AbstractJdtTreeGenerator {
@Override
protected AbstractJdtVisitor createVisitor() {
return new JdtVisitor();
}
}
| 1,180 | 32.742857 | 85 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/gen/jdt/JdtVisitor.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2011-2015 Jean-Rémy Falleri <[email protected]>
* Copyright 2011-2015 Floréal Morandat <[email protected]> *
*/
package gumtreediff.gen.jdt;
import java.util.ArrayList;
import java.util.List;
import org.eclipse.jdt.core.dom.ASTNode;
import org.eclipse.jdt.core.dom.Assignment;
import org.eclipse.jdt.core.dom.BooleanLiteral;
import org.eclipse.jdt.core.dom.CharacterLiteral;
import org.eclipse.jdt.core.dom.InfixExpression;
import org.eclipse.jdt.core.dom.Modifier;
import org.eclipse.jdt.core.dom.Name;
import org.eclipse.jdt.core.dom.NumberLiteral;
import org.eclipse.jdt.core.dom.PostfixExpression;
import org.eclipse.jdt.core.dom.PrefixExpression;
import org.eclipse.jdt.core.dom.QualifiedName;
import org.eclipse.jdt.core.dom.StringLiteral;
import org.eclipse.jdt.core.dom.StructuralPropertyDescriptor;
import org.eclipse.jdt.core.dom.TagElement;
import org.eclipse.jdt.core.dom.TextElement;
import org.eclipse.jdt.core.dom.Type;
public class JdtVisitor extends AbstractJdtVisitor {
public JdtVisitor() {
super();
}
@Override
public void preVisit(ASTNode n) {
pushNode(n, getLabel(n));
String type = n.getClass().getSimpleName();
// System.out.println(type);
// if(type.equals("Block")) {
// System.out.println("size:"+getChildren(n).size());
// }
}
public static List<ASTNode> getChildren(ASTNode node) {
List<ASTNode> children = new ArrayList<>();
List list = node.structuralPropertiesForType();
for (Object element : list) {
Object child = node.getStructuralProperty((StructuralPropertyDescriptor)element);
if (child instanceof ASTNode) {
children.add((ASTNode) child);
}
}
return children;
}
protected String getLabel(ASTNode n) {
if (n instanceof Name) return ((Name) n).getFullyQualifiedName();
if ((n instanceof Type) || (n instanceof Modifier)) return n.toString();
if (n instanceof StringLiteral) return ((StringLiteral) n).getEscapedValue();
if (n instanceof NumberLiteral) return ((NumberLiteral) n).getToken();
if (n instanceof CharacterLiteral) return ((CharacterLiteral) n).getEscapedValue();
if (n instanceof BooleanLiteral) return ((BooleanLiteral) n).toString();
if (n instanceof InfixExpression) return ((InfixExpression) n).getOperator().toString();
if (n instanceof PrefixExpression) return ((PrefixExpression) n).getOperator().toString();
if (n instanceof PostfixExpression) return ((PostfixExpression) n).getOperator().toString();
if (n instanceof Assignment) return ((Assignment) n).getOperator().toString();
if (n instanceof TextElement) return n.toString();
if (n instanceof TagElement) return ((TagElement) n).getTagName();
return "";
}
@Override
public boolean visit(TagElement e) {
return true;
}
@Override
public boolean visit(QualifiedName name) {
return false;
}
@Override
public void postVisit(ASTNode n) {
popNode();
}
}
| 3,826 | 35.447619 | 100 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/gen/srcml/AbstractSrcmlTreeGenerator.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2016 Jean-R茅my Falleri <[email protected]>
*/
package gumtreediff.gen.srcml;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.io.StringReader;
import java.io.Writer;
import java.nio.charset.Charset;
import java.nio.file.Files;
import java.util.ArrayDeque;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Scanner;
import java.util.Set;
import javax.xml.namespace.QName;
import javax.xml.stream.XMLEventReader;
import javax.xml.stream.XMLInputFactory;
import javax.xml.stream.events.Characters;
import javax.xml.stream.events.EndElement;
import javax.xml.stream.events.StartElement;
import javax.xml.stream.events.XMLEvent;
import gumtreediff.gen.TreeGenerator;
import gumtreediff.io.LineReader;
import gumtreediff.tree.ITree;
import gumtreediff.tree.TreeContext;
public abstract class AbstractSrcmlTreeGenerator extends TreeGenerator {
private static final String SRCML_CMD = System.getProperty("gt.srcml.path", "srcml");
private static final QName LINE = new QName("http://www.srcML.org/srcML/position", "line", "pos");
private static final QName COLUMN = new QName("http://www.srcML.org/srcML/position", "column", "pos");
private LineReader lr;
private Set<String> labeled = new HashSet<>(
Arrays.asList("specifier", "name", "comment", "literal", "operator",
"modifier", "include", "directive", "file", "argument", "value"));//第二行gumtree本身未添加
private StringBuilder currentLabel;
private TreeContext context;
@Override
public TreeContext generate(Reader r) throws IOException {
lr = new LineReader(r);
String xml = getXml(lr);
return getTreeContext(xml);
}
public TreeContext getTreeContext(String xml) {
XMLInputFactory fact = XMLInputFactory.newInstance();
context = new TreeContext();
currentLabel = new StringBuilder();
try {
ArrayDeque<ITree> trees = new ArrayDeque<>();
XMLEventReader r = fact.createXMLEventReader(new StringReader(xml));
while (r.hasNext()) {
XMLEvent ev = r.nextEvent();
if (ev.isStartElement()) {
StartElement s = ev.asStartElement();
// System.out.println("StartElL:"+ev.toString());
String typeLabel = s.getName().getLocalPart();
// System.out.println("typeLabel:"+typeLabel);
if (typeLabel.equals("position")) {
setLength(trees.peekFirst(), s);
}else if(typeLabel.equals("comment")) {
int type = typeLabel.hashCode();
ITree t = context.createTree(type, "", typeLabel);
trees.addFirst(t);
continue;//不需要comment节点
}else {
int type = typeLabel.hashCode();
ITree t = context.createTree(type, "", typeLabel);
if (trees.isEmpty()) {
context.setRoot(t);
t.setPos(0);
} else {
t.setParentAndUpdateChildren(trees.peekFirst());
// System.out.println("setpos");
setPos(t, s);
}
trees.addFirst(t);
}
} else if (ev.isEndElement()) {
EndElement end = ev.asEndElement();
// System.out.println("ev:"+end.toString());
if (!end.getName().getLocalPart().equals("position")) {
if (isLabeled(trees))
trees.peekFirst().setLabel(currentLabel.toString());
trees.removeFirst();
currentLabel = new StringBuilder();
}
} else if (ev.isCharacters()) {
Characters chars = ev.asCharacters();
if (!chars.isWhiteSpace() && isLabeled(trees))
currentLabel.append(chars.getData().trim());
}
}
fixPos(context);
context.validate();
int size = context.getRoot().getDescendants().size();
context.setSize(size);
return context;
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
private boolean isLabeled(ArrayDeque<ITree> trees) {
return labeled.contains(context.getTypeLabel(trees.peekFirst().getType()));
}
private void fixPos(TreeContext ctx) {
for (ITree t : ctx.getRoot().postOrder()) {
if (!t.isLeaf()) {
if (t.getPos() == ITree.NO_VALUE || t.getLength() == ITree.NO_VALUE) {
ITree firstChild = t.getChild(0);
t.setPos(firstChild.getPos());
if (t.getChildren().size() == 1)
t.setLength(firstChild.getLength());
else {
ITree lastChild = t.getChild(t.getChildren().size() - 1);
t.setLength(lastChild.getEndPos() - firstChild.getPos());
}
}
}
}
}
private void setPos(ITree t, StartElement e) {
if (e.getAttributeByName(LINE) != null) {
int line = Integer.parseInt(e.getAttributeByName(LINE).getValue());
int column = Integer.parseInt(e.getAttributeByName(COLUMN).getValue());
// System.out.println("line:"+line);
// System.out.println("column"+column);
t.setPos(lr.positionFor(line, column));
t.setLine(line);
t.setColumn(column);
}
}
private void setLength(ITree t, StartElement e) {
if (t.getPos() == -1)
return;
if (e.getAttributeByName(LINE) != null) {
int line = Integer.parseInt(e.getAttributeByName(LINE).getValue());
int column = Integer.parseInt(e.getAttributeByName(COLUMN).getValue());
t.setLength(lr.positionFor(line, column) - t.getPos() + 1);
t.setLastLine(line);
t.setLastColumn(column);
}
}
public String getXml(Reader r) throws IOException {
// String path = "exception.txt";
// BufferedWriter wr = new BufferedWriter(new FileWriter(new File(path)));
//FIXME this is not efficient but I am not sure how to speed up things here.
File f = File.createTempFile("gumtree", "");
File xmlFile = new File("raw.xml");
BufferedWriter wr = new BufferedWriter(new FileWriter(xmlFile));
try (
Writer w = Files.newBufferedWriter(f.toPath(), Charset.forName("UTF-8"));
BufferedReader br = new BufferedReader(r);
) {
String line = br.readLine();
while (line != null) {
w.append(line + System.lineSeparator());
line = br.readLine();
}
}
ProcessBuilder pb = new ProcessBuilder(getArguments(f.getAbsolutePath()));
pb.redirectErrorStream(true);
pb.directory(f.getParentFile());
try {
Process p = pb.start();
Scanner scanner = new Scanner(p.getInputStream(), "UTF-8");
StringBuilder buf = new StringBuilder();
// TODO Why do we need to read and bufferize everything, when we could/should only use generateFromStream
while (scanner.hasNextLine()) {
buf.append(scanner.nextLine() + System.lineSeparator());
}
scanner.close();
p.waitFor();
// if (exit != 0) {
// throw new RuntimeException();
// }
p.destroy();
// Thread.sleep(1000);
String xml = buf.toString();
// System.out.println("xml:"+xml);
wr.append(xml);
wr.newLine();
wr.flush();
return xml;
} catch (InterruptedException e) {
throw new RuntimeException(e);
} finally {
wr.close();
f.delete();
}
}
private static void splitMessage(final InputStream input) {
// try {
// Process p = b.start();
// final InputStream is1 = p.getInputStream();
// final InputStream is2 = p.getErrorStream();
// new Thread() {
// public void run(){
// try {
// BufferedReader br = new BufferedReader(new InputStreamReader(is1, "UTF-8"));
// StringBuilder buf = new StringBuilder();
// String line = null ;
// while((line = br.readLine())!=null){
// buf.append(line + System.lineSeparator());
// }
// r.close();
// xml = buf.toString();
// } catch (IOException e) {
// e.printStackTrace();
// }
// }
// }.start();
//
// new Thread() {
// public void run(){
// try {
// BufferedReader br2 = new BufferedReader(new InputStreamReader(is2, "UTF-8"));
// String lineC = null ;
// while((lineC = br2.readLine())!= null){
// if(lineC!=null)
// System.out.println(lineC);
// }
// } catch (IOException e) {
// e.printStackTrace();
// }
// }
// }.start();
// p.waitFor();
// } catch (Exception e) {
// System.err.println(e);
// }
new Thread(new Runnable(){
@Override
public void run() {
try {
Reader r = new InputStreamReader(input, "UTF-8");
BufferedReader br = new BufferedReader(r);
StringBuilder buf = new StringBuilder();
String line = null;
while((line=br.readLine())!=null) {
buf.append(line + System.lineSeparator());
// System.out.println(line);
}
} catch (IOException e) {
e.printStackTrace();
}
}
}).start();
}
public abstract String getLanguage();
public String[] getArguments(String file) {
return new String[]{SRCML_CMD, "-l", getLanguage(), "--position", file, "--tabs=1"};
}
}
| 11,458 | 37.069767 | 117 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/gen/srcml/SrcmlCTreeGenerator.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2016 Jean-Rémy Falleri <[email protected]>
*/
package gumtreediff.gen.srcml;
import gumtreediff.gen.Register;
@Register(id = "c-srcml", accept = "\\.[ch]$")
public class SrcmlCTreeGenerator extends AbstractSrcmlTreeGenerator {
@Override
public String getLanguage() {
return "C";
}
}
| 1,018 | 30.84375 | 78 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/gen/srcml/SrcmlCppTreeGenerator.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2016 Jean-Rémy Falleri <[email protected]>
*/
package gumtreediff.gen.srcml;
import gumtreediff.gen.Register;
@Register(id = "cpp-srcml", accept = "\\.(CC?|cpp|cc|hh?|hpp)$")
public class SrcmlCppTreeGenerator extends AbstractSrcmlTreeGenerator {
@Override
public String getLanguage() {
return "C++";
}
}
| 1,040 | 31.53125 | 78 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/gen/srcml/SrcmlCsTreeGenerator.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2016 Jean-Rémy Falleri <[email protected]>
*/
package gumtreediff.gen.srcml;
import gumtreediff.gen.Register;
@Register(id = "cs-srcml", accept = "\\.cs$")
public class SrcmlCsTreeGenerator extends AbstractSrcmlTreeGenerator {
@Override
public String getLanguage() {
return "C#";
}
}
| 1,019 | 30.875 | 78 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/gen/srcml/SrcmlJavaTreeGenerator.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2016 Jean-Rémy Falleri <[email protected]>
*/
package gumtreediff.gen.srcml;
import gumtreediff.gen.Register;
@Register(id = "java-srcml", accept = "\\.java$")
public class SrcmlJavaTreeGenerator extends AbstractSrcmlTreeGenerator {
@Override
public String getLanguage() {
return "Java";
}
}
| 1,027 | 31.125 | 78 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/gen/srcml/TestSrcmlCppGenerator.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2016 Jean-Rémy Falleri <[email protected]>
*/
package gumtreediff.gen.srcml;
import java.io.IOException;
import org.junit.Assert;
import org.junit.Test;
import gumtreediff.tree.ITree;
public class TestSrcmlCppGenerator {
@Test
public void testSimple() throws IOException {
String input = "\n"
+ "namespace R {\n"
+ "template <typename T>\n"
+ "static inline void print_array(T *__recv){\n"
+ " int len = LEN(__recv);\n"
+ " fprintf(stdout, \"%d:%d [\", TYPE(__recv), len);\n"
+ " for(int i = 0; i < len; i++)\n"
+ " print_item(__recv, i);\n"
+ " fprintf(stdout, \" ]\\n\");\n"
+ "}\n"
+ "\n"
+ "template <typename T>\n"
+ "static inline void print_item(T *__recv, int idx){\n"
+ " fprintf(stdout, \" %x\", GET(__recv, idx));\n"
+ "}\n"
+ "}";
ITree t = new SrcmlCppTreeGenerator().generateFromString(input).getRoot();
Assert.assertEquals(148, t.getSize());
}
}
| 1,871 | 33.666667 | 82 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/gen/srcml/TestSrcmlCsGenerator.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2016 Jean-Rémy Falleri <[email protected]>
*/
package gumtreediff.gen.srcml;
import java.io.IOException;
import org.junit.Assert;
import org.junit.Test;
import gumtreediff.tree.ITree;
public class TestSrcmlCsGenerator {
@Test
public void testSimple() throws IOException {
String input = "using System;\n"
+ "public class HelloWorld\n"
+ "{\n"
+ "public static void Main()\n"
+ "{\n"
+ "Console.WriteLine(\"Hello world !\");\n"
+ "Console.ReadLine();\n"
+ "}\n"
+ "}";
ITree t = new SrcmlCsTreeGenerator().generateFromString(input).getRoot();
Assert.assertEquals(34, t.getSize());
}
}
| 1,466 | 30.212766 | 81 |
java
|
SeqTrans
|
SeqTrans-master/gumtree/src/gumtreediff/gen/srcml/TestSrcmlJavaGenerator.java
|
/*
* This file is part of GumTree.
*
* GumTree is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GumTree is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GumTree. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2016 Jean-Rémy Falleri <[email protected]>
*/
package gumtreediff.gen.srcml;
import java.io.IOException;
import org.junit.Assert;
import org.junit.Test;
import gumtreediff.tree.ITree;
import gumtreediff.tree.TreeContext;
public class TestSrcmlJavaGenerator {
@Test
public void testSimple() throws IOException {
String input = "public class HelloWorld {\n"
+ "public static void main(String[] args) {\n"
+ "System.out.println(\"Hello, World\");\n"
+ "}\n"
+ "}";
TreeContext ctx = new SrcmlJavaTreeGenerator().generateFromString(input);
ITree t = ctx.getRoot();
Assert.assertEquals(33, t.getSize());
}
}
| 1,429 | 30.777778 | 81 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.