language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def addCourseDetails(NB, CourseID='1MS926', CourseName='Applied Statistics', CourseInstance='Spring 2019, Uppsala University') : '''Add Course Details to the Master Notebook NB''' #NB['metadata']['lx_assignment_number']=AssignmentNumber NB['metadata']['lx_course_number']=CourseID NB['metadata']['lx_course_name']=CourseName NB['metadata']['lx_course_instance']=CourseInstance md = '''# [{}](https://lamastex.github.io/scalable-data-science/as/2019/)\ \n## {}, {} \n©2019 Raazesh Sainudiin. [Attribution 4.0 International \ (CC BY 4.0)](https://creativecommons.org/licenses/by/4.0/)'''\ .format(CourseName,CourseID,CourseInstance) newCell = nbformat.v4.new_markdown_cell(md) newCell['metadata']['deletable']=False NB['cells'].insert(0,newCell) return NB
def addCourseDetails(NB, CourseID='1MS926', CourseName='Applied Statistics', CourseInstance='Spring 2019, Uppsala University') : '''Add Course Details to the Master Notebook NB''' #NB['metadata']['lx_assignment_number']=AssignmentNumber NB['metadata']['lx_course_number']=CourseID NB['metadata']['lx_course_name']=CourseName NB['metadata']['lx_course_instance']=CourseInstance md = '''# [{}](https://lamastex.github.io/scalable-data-science/as/2019/)\ \n## {}, {} \n©2019 Raazesh Sainudiin. [Attribution 4.0 International \ (CC BY 4.0)](https://creativecommons.org/licenses/by/4.0/)'''\ .format(CourseName,CourseID,CourseInstance) newCell = nbformat.v4.new_markdown_cell(md) newCell['metadata']['deletable']=False NB['cells'].insert(0,newCell) return NB
Python
def parseCommentMarksIntoMDAndMetaData(NB): '''take master NB and turn the #PROBLEM x, #POINT y, #TEST x into md cells ADD PROBLEM number in cell metadata TODO? make dictionary of points scored, etc.''' cellIndex=-1 AssnProbDict={} indicesToInsertCells=[] for C in NB['cells']: cellIndex=cellIndex+1 s = C['source'] sSplitByNewLines = s.split('\n') l0=sSplitByNewLines[0] # first line sAllButl0 = '\n'.join(sSplitByNewLines[1:]) # gives '' if no extra lines are there matchObj = re.match(r'#\s*(\w+)\s+(\d+),\s*(\w+)\s+(\d+),\s*(\w+)\s*(\d+)', l0, re.U) if matchObj: #print cellIndex, l0 assignmentNum = str(int(matchObj.group(2))) LX_Prob_CellType = matchObj.group(3) probNum=str(int(matchObj.group(4))) probPoints=str(int(matchObj.group(6))) C['metadata']['lx_assignment_number']=assignmentNum C['metadata']['lx_problem_cell_type']=LX_Prob_CellType C['metadata']['lx_problem_number']=probNum C['metadata']['lx_problem_points']=probPoints if (LX_Prob_CellType == 'PROBLEM'): C['source'] = sAllButl0 # remove first comment line containing PROBLEM if assignmentNum+'_'+probNum not in AssnProbDict: md='''---\n---\n## Assignment {}, Problem {}\nMaximum Points = {}'''.format(assignmentNum,probNum,probPoints) newCell = nbformat.v4.new_markdown_cell(md) newCell['metadata']['lx_problem_cell_type']=LX_Prob_CellType newCell['metadata']['lx_assignment_number']=assignmentNum indicesToInsertCells.append([cellIndex,newCell]) cellIndex=cellIndex+1 AssnProbDict[assignmentNum+'_'+probNum]=1 # now insert the md cells at the right places for iC in indicesToInsertCells: NB['cells'].insert(iC[0],iC[1]) return NB
def parseCommentMarksIntoMDAndMetaData(NB): '''take master NB and turn the #PROBLEM x, #POINT y, #TEST x into md cells ADD PROBLEM number in cell metadata TODO? make dictionary of points scored, etc.''' cellIndex=-1 AssnProbDict={} indicesToInsertCells=[] for C in NB['cells']: cellIndex=cellIndex+1 s = C['source'] sSplitByNewLines = s.split('\n') l0=sSplitByNewLines[0] # first line sAllButl0 = '\n'.join(sSplitByNewLines[1:]) # gives '' if no extra lines are there matchObj = re.match(r'#\s*(\w+)\s+(\d+),\s*(\w+)\s+(\d+),\s*(\w+)\s*(\d+)', l0, re.U) if matchObj: #print cellIndex, l0 assignmentNum = str(int(matchObj.group(2))) LX_Prob_CellType = matchObj.group(3) probNum=str(int(matchObj.group(4))) probPoints=str(int(matchObj.group(6))) C['metadata']['lx_assignment_number']=assignmentNum C['metadata']['lx_problem_cell_type']=LX_Prob_CellType C['metadata']['lx_problem_number']=probNum C['metadata']['lx_problem_points']=probPoints if (LX_Prob_CellType == 'PROBLEM'): C['source'] = sAllButl0 # remove first comment line containing PROBLEM if assignmentNum+'_'+probNum not in AssnProbDict: md='''---\n---\n## Assignment {}, Problem {}\nMaximum Points = {}'''.format(assignmentNum,probNum,probPoints) newCell = nbformat.v4.new_markdown_cell(md) newCell['metadata']['lx_problem_cell_type']=LX_Prob_CellType newCell['metadata']['lx_assignment_number']=assignmentNum indicesToInsertCells.append([cellIndex,newCell]) cellIndex=cellIndex+1 AssnProbDict[assignmentNum+'_'+probNum]=1 # now insert the md cells at the right places for iC in indicesToInsertCells: NB['cells'].insert(iC[0],iC[1]) return NB
Python
def addGenericAssignmentAndCourseHeader(NB,AssNum): '''Add generic Header for Assignment Number AssNum to input NB''' NB['metadata']['lx_assignment_number']=AssNum CourseID=NB['metadata']['lx_course_number'] md = '''# Assignment {} for Course {}\nFill in your Personal Number, make \ sure you pass the `# ... Test` cells and\n submit by email with *Subject line* \ **{} Assignment{}**.\nYou can submit multiple times before the deadline \ and your highest score will be used.'''.format(AssNum,CourseID,CourseID,AssNum) newCell = nbformat.v4.new_markdown_cell(md) newCell['metadata']['deletable']=False NB['cells'].insert(1,newCell) return NB
def addGenericAssignmentAndCourseHeader(NB,AssNum): '''Add generic Header for Assignment Number AssNum to input NB''' NB['metadata']['lx_assignment_number']=AssNum CourseID=NB['metadata']['lx_course_number'] md = '''# Assignment {} for Course {}\nFill in your Personal Number, make \ sure you pass the `# ... Test` cells and\n submit by email with *Subject line* \ **{} Assignment{}**.\nYou can submit multiple times before the deadline \ and your highest score will be used.'''.format(AssNum,CourseID,CourseID,AssNum) newCell = nbformat.v4.new_markdown_cell(md) newCell['metadata']['deletable']=False NB['cells'].insert(1,newCell) return NB
Python
def addStudentIdAtBeginning(NB): '''Add a non-Deletable Cell with Student Person Number''' newCell = nbformat.v4.new_code_cell('''# Enter your 12 digit personal number here and evaluate this cell\nMyPersonalNumber = 'YYYYMMDDXXXX'\n\n#tests\nassert(isinstance(MyPersonalNumber, basestring))\nassert(MyPersonalNumber.isdigit())\nassert(len(MyPersonalNumber)==12)''') newCell['metadata']['lx_cell_type']='personal_number' newCell['metadata']['deletable']=False NB['cells'].insert(2,newCell) return NB
def addStudentIdAtBeginning(NB): '''Add a non-Deletable Cell with Student Person Number''' newCell = nbformat.v4.new_code_cell('''# Enter your 12 digit personal number here and evaluate this cell\nMyPersonalNumber = 'YYYYMMDDXXXX'\n\n#tests\nassert(isinstance(MyPersonalNumber, basestring))\nassert(MyPersonalNumber.isdigit())\nassert(len(MyPersonalNumber)==12)''') newCell['metadata']['lx_cell_type']='personal_number' newCell['metadata']['deletable']=False NB['cells'].insert(2,newCell) return NB
Python
def makeStudentAssignmentNotebookWithProblemsAndWithoutSOLUTIONandTEST(NBList,AssNum): '''remove TEST, SOLUTION cells and only make PROBLEMs and Self-Test cells of Assignment AssNum''' # to create assignments from the master notebook NB0=NBList[0].copy() NB0 = addGenericAssignmentAndCourseHeader(NB0,AssNum) # Add generic but Assignment/course-specific header NB0 = addStudentIdAtBeginning(NB0) # Add Student ID Cell studentCells=NB0['cells'][0:3] for NB in NBList: for C in NB['cells']: appendCell=False assignmentNumber='' probCellType='' if 'lx_assignment_number' in C['metadata']: assignmentNumber = C['metadata']['lx_assignment_number'] if assignmentNumber==AssNum: appendCell=True if 'lx_problem_cell_type' in C['metadata']: probCellType = C['metadata']['lx_problem_cell_type'] if ( ("SOLUTION" in probCellType) or ("TEST" in probCellType) ): appendCell=False if appendCell: studentCells.append(C) NB0['cells']=studentCells return NB0
def makeStudentAssignmentNotebookWithProblemsAndWithoutSOLUTIONandTEST(NBList,AssNum): '''remove TEST, SOLUTION cells and only make PROBLEMs and Self-Test cells of Assignment AssNum''' # to create assignments from the master notebook NB0=NBList[0].copy() NB0 = addGenericAssignmentAndCourseHeader(NB0,AssNum) # Add generic but Assignment/course-specific header NB0 = addStudentIdAtBeginning(NB0) # Add Student ID Cell studentCells=NB0['cells'][0:3] for NB in NBList: for C in NB['cells']: appendCell=False assignmentNumber='' probCellType='' if 'lx_assignment_number' in C['metadata']: assignmentNumber = C['metadata']['lx_assignment_number'] if assignmentNumber==AssNum: appendCell=True if 'lx_problem_cell_type' in C['metadata']: probCellType = C['metadata']['lx_problem_cell_type'] if ( ("SOLUTION" in probCellType) or ("TEST" in probCellType) ): appendCell=False if appendCell: studentCells.append(C) NB0['cells']=studentCells return NB0
Python
def makeStudentAssignmentNotebookWithProblemsAndWithSOLUTION(NBList,AssNum): '''keep SOLUTION cells as well as PROBLEMs and Self-Test cells of Assignment AssNum''' # to create assignments from the master notebook NB0=NBList[0].copy() NB0 = addGenericAssignmentAndCourseHeader(NB0,AssNum) # Add generic but Assignment/course-specific header NB0 = addStudentIdAtBeginning(NB0) # Add Student ID Cell studentCells=NB0['cells'][0:3] for NB in NBList: for C in NB['cells']: appendCell=False assignmentNumber='' probCellType='' if 'lx_assignment_number' in C['metadata']: assignmentNumber = C['metadata']['lx_assignment_number'] if assignmentNumber==AssNum: appendCell=True if 'lx_problem_cell_type' in C['metadata']: probCellType = C['metadata']['lx_problem_cell_type'] if ("SOLUTION" in probCellType): # not putting TEST cells or ("TEST" in probCellType) ): appendCell=True if appendCell: studentCells.append(C) NB0['cells']=studentCells return NB0
def makeStudentAssignmentNotebookWithProblemsAndWithSOLUTION(NBList,AssNum): '''keep SOLUTION cells as well as PROBLEMs and Self-Test cells of Assignment AssNum''' # to create assignments from the master notebook NB0=NBList[0].copy() NB0 = addGenericAssignmentAndCourseHeader(NB0,AssNum) # Add generic but Assignment/course-specific header NB0 = addStudentIdAtBeginning(NB0) # Add Student ID Cell studentCells=NB0['cells'][0:3] for NB in NBList: for C in NB['cells']: appendCell=False assignmentNumber='' probCellType='' if 'lx_assignment_number' in C['metadata']: assignmentNumber = C['metadata']['lx_assignment_number'] if assignmentNumber==AssNum: appendCell=True if 'lx_problem_cell_type' in C['metadata']: probCellType = C['metadata']['lx_problem_cell_type'] if ("SOLUTION" in probCellType): # not putting TEST cells or ("TEST" in probCellType) ): appendCell=True if appendCell: studentCells.append(C) NB0['cells']=studentCells return NB0
Python
def extractAssignmentFromMasterNotebookNumbers(inputMasterNBNos, AssNum): '''extract assignment num AssNum from list of master notebook numbers''' masterNotebooks=[] for inputMasterNBNum in inputMasterNBNos: inputMasterNB=inputMasterNBNum+'.ipynb' #read master notebook nb = nbformat.read('master/jp/'+inputMasterNB, as_version=4) nb = addCourseDetails(nb) nb = parseCommentMarksIntoMDAndMetaData(nb) masterNotebooks.append(nb) nb2019jpAss = makeStudentAssignmentNotebookWithProblemsAndWithoutSOLUTIONandTEST(masterNotebooks,AssNum) nbformat.write(nb2019jpAss,'2019/jp/Assignment0'+AssNum+'.ipynb')
def extractAssignmentFromMasterNotebookNumbers(inputMasterNBNos, AssNum): '''extract assignment num AssNum from list of master notebook numbers''' masterNotebooks=[] for inputMasterNBNum in inputMasterNBNos: inputMasterNB=inputMasterNBNum+'.ipynb' #read master notebook nb = nbformat.read('master/jp/'+inputMasterNB, as_version=4) nb = addCourseDetails(nb) nb = parseCommentMarksIntoMDAndMetaData(nb) masterNotebooks.append(nb) nb2019jpAss = makeStudentAssignmentNotebookWithProblemsAndWithoutSOLUTIONandTEST(masterNotebooks,AssNum) nbformat.write(nb2019jpAss,'2019/jp/Assignment0'+AssNum+'.ipynb')
Python
def extractAssignmentWithSolutionsFromMasterNotebookNumbers(inputMasterNBNos, AssNum): '''extract assignment num AssNum from list of master notebook numbers''' masterNotebooks=[] for inputMasterNBNum in inputMasterNBNos: inputMasterNB=inputMasterNBNum+'.ipynb' #read master notebook nb = nbformat.read('master/jp/'+inputMasterNB, as_version=4) nb = addCourseDetails(nb) nb = parseCommentMarksIntoMDAndMetaData(nb) masterNotebooks.append(nb) nb2019jpAss = makeStudentAssignmentNotebookWithProblemsAndWithSOLUTION(masterNotebooks,AssNum) nbformat.write(nb2019jpAss,'2019/jp/Assignment0'+AssNum+'_soln.ipynb')
def extractAssignmentWithSolutionsFromMasterNotebookNumbers(inputMasterNBNos, AssNum): '''extract assignment num AssNum from list of master notebook numbers''' masterNotebooks=[] for inputMasterNBNum in inputMasterNBNos: inputMasterNB=inputMasterNBNum+'.ipynb' #read master notebook nb = nbformat.read('master/jp/'+inputMasterNB, as_version=4) nb = addCourseDetails(nb) nb = parseCommentMarksIntoMDAndMetaData(nb) masterNotebooks.append(nb) nb2019jpAss = makeStudentAssignmentNotebookWithProblemsAndWithSOLUTION(masterNotebooks,AssNum) nbformat.write(nb2019jpAss,'2019/jp/Assignment0'+AssNum+'_soln.ipynb')
Python
def convert_css_to_xpath(css): """ Convert CSS Selectors to XPath Selectors. Example: convert_css_to_xpath('button:contains("Next")') Output => "//button[contains(., 'Next')]" """ xpath = GenericTranslator().css_to_xpath(css, prefix='//') return xpath
def convert_css_to_xpath(css): """ Convert CSS Selectors to XPath Selectors. Example: convert_css_to_xpath('button:contains("Next")') Output => "//button[contains(., 'Next')]" """ xpath = GenericTranslator().css_to_xpath(css, prefix='//') return xpath
Python
def plot_stack_ccorr(arrayName, x0, y0, type_stack, w, Tmax, amp, amp_lin, \ amp_pow, amp_PWS, n1, n2): """ This function stacks the cross correlation over all the tremor windows and plot the stack Input: type arrayName = string arrayName = Name of seismic array type x0 = float x0 = Distance of the center of the cell from the array (east) type y0 = float y0 = Distance of the center of the cell from the array (north) type type_stack = string type_stack = Type of stack ('lin', 'pow', 'PWS') type w = float w = Power of the stack (for 'pow' and 'PWS') type Tmax = float Tmax = Maximum time lag for cross correlation plot type amp = float amp = Amplification factor of cross correlation for plotting type amp_lin = float amp_lin = Amplification factor of linear stack for plotting type amp_pow = float amp_pow = Amplification factor of power stack for plotting type amp_PWS = float amp_PWS = Amplification factor of phase-weighted stack for plotting type n1 = integer n1 = Index of first tremor to be plotted type n2 = integer n2 = Index of last tremor to be plotted """ # Read file containing data from stack_ccorr_tremor filename = 'cc/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}.pkl'.format( \ arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), int(y0), \ type_stack) data = pickle.load(open(filename, 'rb')) EW_UD = data[6] NS_UD = data[7] # Stack over all tremor windows EW_lin = linstack([EW_UD], normalize=False)[0] EW_pow = powstack([EW_UD], w, normalize=False)[0] EW_PWS = PWstack([EW_UD], w, normalize=False)[0] NS_lin = linstack([NS_UD], normalize=False)[0] NS_pow = powstack([NS_UD], w, normalize=False)[0] NS_PWS = PWstack([NS_UD], w, normalize=False)[0] # Plot plt.figure(1, figsize=(20, 15)) # EW - UD cross correlation ax1 = plt.subplot(121) for i in range(n1, n2): dt = EW_UD[i].stats.delta ncor = int((EW_UD[i].stats.npts - 1) / 2) t = dt * np.arange(- ncor, ncor + 1) if (i == 36): plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * EW_UD[i].data, 'r-') elif (i == 51): plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * EW_UD[i].data, 'b-') elif (i == 19): plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * EW_UD[i].data, 'g-') else: plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * EW_UD[i].data, 'k-') wave = Rectangle((4.0, 5), 1.4, 86, facecolor = 'c', fill=True, alpha=0.5) ax1.add_patch(wave) plt.plot(t, - 2.0 + amp_lin * EW_lin.data, 'r-') plt.plot(t, - 2.0 + amp_pow * EW_pow.data, 'b-') plt.plot(t, - 2.0 + amp_PWS * EW_PWS.data, 'g-') plt.xlim(0, Tmax) plt.ylim(- 5.0, 2.0 * (n2 - n1)) plt.title('East / Vertical component', fontsize=24) plt.xlabel('Lag time (s)', fontsize=24) plt.ylabel('Cross correlation', fontsize=24) ax1.set_yticklabels([]) ax1.tick_params(labelsize=20) # NS - UD cross correlation ax2 = plt.subplot(122) for i in range(n1, n2): dt = NS_UD[i].stats.delta ncor = int((NS_UD[i].stats.npts - 1) / 2) t = dt * np.arange(- ncor, ncor + 1) if (i == 36): plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * NS_UD[i].data, 'r-') elif (i == 51): plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * NS_UD[i].data, 'b-') elif (i == 19): plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * NS_UD[i].data, 'g-') else: plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * NS_UD[i].data, 'k-') wave = Rectangle((4.0, 5), 1.4, 86, facecolor = 'c', fill=True, alpha=0.5) ax2.add_patch(wave) plt.plot(t, - 2.0 + amp_lin * NS_lin.data, 'r-') plt.plot(t, - 2.0 + amp_pow * NS_pow.data, 'b-') plt.plot(t, - 2.0 + amp_PWS * NS_PWS.data, 'g-') plt.xlim(0, Tmax) plt.ylim(- 5.0, 2.0 * (n2 - n1)) plt.title('North / Vertical component', fontsize=24) plt.xlabel('Lag time (s)', fontsize=24) plt.ylabel('Cross correlation', fontsize=24) ax2.set_yticklabels([]) ax2.tick_params(labelsize=20) # End figure and plot plt.suptitle('{} at {} km, {} km'.format(arrayName, x0, y0), fontsize=24) plt.savefig('cc/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}.eps'.format( \ arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), int(y0), \ type_stack), format='eps') ax1.clear() ax2.clear() plt.close(1)
def plot_stack_ccorr(arrayName, x0, y0, type_stack, w, Tmax, amp, amp_lin, \ amp_pow, amp_PWS, n1, n2): """ This function stacks the cross correlation over all the tremor windows and plot the stack Input: type arrayName = string arrayName = Name of seismic array type x0 = float x0 = Distance of the center of the cell from the array (east) type y0 = float y0 = Distance of the center of the cell from the array (north) type type_stack = string type_stack = Type of stack ('lin', 'pow', 'PWS') type w = float w = Power of the stack (for 'pow' and 'PWS') type Tmax = float Tmax = Maximum time lag for cross correlation plot type amp = float amp = Amplification factor of cross correlation for plotting type amp_lin = float amp_lin = Amplification factor of linear stack for plotting type amp_pow = float amp_pow = Amplification factor of power stack for plotting type amp_PWS = float amp_PWS = Amplification factor of phase-weighted stack for plotting type n1 = integer n1 = Index of first tremor to be plotted type n2 = integer n2 = Index of last tremor to be plotted """ # Read file containing data from stack_ccorr_tremor filename = 'cc/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}.pkl'.format( \ arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), int(y0), \ type_stack) data = pickle.load(open(filename, 'rb')) EW_UD = data[6] NS_UD = data[7] # Stack over all tremor windows EW_lin = linstack([EW_UD], normalize=False)[0] EW_pow = powstack([EW_UD], w, normalize=False)[0] EW_PWS = PWstack([EW_UD], w, normalize=False)[0] NS_lin = linstack([NS_UD], normalize=False)[0] NS_pow = powstack([NS_UD], w, normalize=False)[0] NS_PWS = PWstack([NS_UD], w, normalize=False)[0] # Plot plt.figure(1, figsize=(20, 15)) # EW - UD cross correlation ax1 = plt.subplot(121) for i in range(n1, n2): dt = EW_UD[i].stats.delta ncor = int((EW_UD[i].stats.npts - 1) / 2) t = dt * np.arange(- ncor, ncor + 1) if (i == 36): plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * EW_UD[i].data, 'r-') elif (i == 51): plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * EW_UD[i].data, 'b-') elif (i == 19): plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * EW_UD[i].data, 'g-') else: plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * EW_UD[i].data, 'k-') wave = Rectangle((4.0, 5), 1.4, 86, facecolor = 'c', fill=True, alpha=0.5) ax1.add_patch(wave) plt.plot(t, - 2.0 + amp_lin * EW_lin.data, 'r-') plt.plot(t, - 2.0 + amp_pow * EW_pow.data, 'b-') plt.plot(t, - 2.0 + amp_PWS * EW_PWS.data, 'g-') plt.xlim(0, Tmax) plt.ylim(- 5.0, 2.0 * (n2 - n1)) plt.title('East / Vertical component', fontsize=24) plt.xlabel('Lag time (s)', fontsize=24) plt.ylabel('Cross correlation', fontsize=24) ax1.set_yticklabels([]) ax1.tick_params(labelsize=20) # NS - UD cross correlation ax2 = plt.subplot(122) for i in range(n1, n2): dt = NS_UD[i].stats.delta ncor = int((NS_UD[i].stats.npts - 1) / 2) t = dt * np.arange(- ncor, ncor + 1) if (i == 36): plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * NS_UD[i].data, 'r-') elif (i == 51): plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * NS_UD[i].data, 'b-') elif (i == 19): plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * NS_UD[i].data, 'g-') else: plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * NS_UD[i].data, 'k-') wave = Rectangle((4.0, 5), 1.4, 86, facecolor = 'c', fill=True, alpha=0.5) ax2.add_patch(wave) plt.plot(t, - 2.0 + amp_lin * NS_lin.data, 'r-') plt.plot(t, - 2.0 + amp_pow * NS_pow.data, 'b-') plt.plot(t, - 2.0 + amp_PWS * NS_PWS.data, 'g-') plt.xlim(0, Tmax) plt.ylim(- 5.0, 2.0 * (n2 - n1)) plt.title('North / Vertical component', fontsize=24) plt.xlabel('Lag time (s)', fontsize=24) plt.ylabel('Cross correlation', fontsize=24) ax2.set_yticklabels([]) ax2.tick_params(labelsize=20) # End figure and plot plt.suptitle('{} at {} km, {} km'.format(arrayName, x0, y0), fontsize=24) plt.savefig('cc/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}.eps'.format( \ arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), int(y0), \ type_stack), format='eps') ax1.clear() ax2.clear() plt.close(1)
Python
def MAD(data): """ Compute MAD estimator of scale (from Rousseeuw and Croux, 1993) """ m = np.median(data) res = 1.4826 * np.median(np.abs(data - m)) return res
def MAD(data): """ Compute MAD estimator of scale (from Rousseeuw and Croux, 1993) """ m = np.median(data) res = 1.4826 * np.median(np.abs(data - m)) return res
Python
def S(data): """ Compute Sn estimator of scale (from Rousseeuw and Croux, 1993) """ N = np.shape(data)[0] m = np.zeros(N) for i in range(0, N): m[i] = np.median(np.abs(data[i] - data)) res = 1.1926 * np.median(m) return res
def S(data): """ Compute Sn estimator of scale (from Rousseeuw and Croux, 1993) """ N = np.shape(data)[0] m = np.zeros(N) for i in range(0, N): m[i] = np.median(np.abs(data[i] - data)) res = 1.1926 * np.median(m) return res
Python
def linstack(streams, normalize=True, method='RMS'): """ Compute the linear stack of a list of streams Several streams -> returns a stack for each station and each channel One stream -> returns a stack for each channel (and merge stations) Input: type streams = list of streams streams = List of streams to stack type normalize = boolean normalize = Normalize traces by RMS amplitude before stacking Output: type stack = stream stack = Stream with stacked traces for each channel (and each station) """ # If there are several streams in the list, # return one stack for each station and each channel if len(streams) > 1: stack = streams[np.argmax([len(stream) for stream in streams])].copy() # If there is only one stream in the list, # return one stack for each channel and merge the stations else: channels = [] for tr in streams[0]: if not(tr.stats.channel in channels): channels.append(tr.stats.channel) stack = Stream() for i in range(0, len(channels)): stack.append(streams[0][0].copy()) stack[-1].stats['channel'] = channels[i] stack[-1].stats['station'] = 'all' # Initialize trace to 0 for tr in stack: tr.data = np.zeros(tr.stats.npts) # Initialize number of traces stacked to 0 ntr = np.zeros((len(stack))) # Stack traces for i in range(0, len(streams)): for k in range (0, len(stack)): if len(streams) > 1: matchtr = streams[i].select(station=stack[k].stats.station, \ channel=stack[k].stats.channel) else: matchtr = streams[i].select(channel=stack[k].stats.channel) for j in range(0, len(matchtr)): ntr[k] = ntr[k] + 1 # Normalize the data before stacking if normalize: if (method == 'RMS'): norm = matchtr[j].data / \ np.sqrt(np.mean(np.square(matchtr[j].data))) elif (method == 'Max'): norm = matchtr[j].data / \ np.max(np.abs(matchtr[j].data)) else: raise ValueError( \ 'Method must be RMS or Max') norm = np.nan_to_num(norm) else: norm = matchtr[j].data stack[k].data = np.sum((norm, stack[k].data), axis=0) # Divide by the number of traces stacked for k in range (0, len(stack)): stack[k].data = stack[k].data / ntr[k] return stack
def linstack(streams, normalize=True, method='RMS'): """ Compute the linear stack of a list of streams Several streams -> returns a stack for each station and each channel One stream -> returns a stack for each channel (and merge stations) Input: type streams = list of streams streams = List of streams to stack type normalize = boolean normalize = Normalize traces by RMS amplitude before stacking Output: type stack = stream stack = Stream with stacked traces for each channel (and each station) """ # If there are several streams in the list, # return one stack for each station and each channel if len(streams) > 1: stack = streams[np.argmax([len(stream) for stream in streams])].copy() # If there is only one stream in the list, # return one stack for each channel and merge the stations else: channels = [] for tr in streams[0]: if not(tr.stats.channel in channels): channels.append(tr.stats.channel) stack = Stream() for i in range(0, len(channels)): stack.append(streams[0][0].copy()) stack[-1].stats['channel'] = channels[i] stack[-1].stats['station'] = 'all' # Initialize trace to 0 for tr in stack: tr.data = np.zeros(tr.stats.npts) # Initialize number of traces stacked to 0 ntr = np.zeros((len(stack))) # Stack traces for i in range(0, len(streams)): for k in range (0, len(stack)): if len(streams) > 1: matchtr = streams[i].select(station=stack[k].stats.station, \ channel=stack[k].stats.channel) else: matchtr = streams[i].select(channel=stack[k].stats.channel) for j in range(0, len(matchtr)): ntr[k] = ntr[k] + 1 # Normalize the data before stacking if normalize: if (method == 'RMS'): norm = matchtr[j].data / \ np.sqrt(np.mean(np.square(matchtr[j].data))) elif (method == 'Max'): norm = matchtr[j].data / \ np.max(np.abs(matchtr[j].data)) else: raise ValueError( \ 'Method must be RMS or Max') norm = np.nan_to_num(norm) else: norm = matchtr[j].data stack[k].data = np.sum((norm, stack[k].data), axis=0) # Divide by the number of traces stacked for k in range (0, len(stack)): stack[k].data = stack[k].data / ntr[k] return stack
Python
def powstack(streams, weight=2.0, normalize=True): """ Compute the power (Nth-root) stack of a list of streams Several streams -> returns a stack for each station and each channel One stream -> returns a stack for each channel (and merge stations) Input: type streams = list of streams streams = List of streams to stack type weight = float weight = Power of the stack (usually integer greater than 1) type normalize = boolean normalize = Normalize traces by RMS amplitude before stacking Output: type stack = stream stack = Stream with stacked traces for each channel (and each station) """ # If there are several streams in the list, # return one stack for each station and each channel if len(streams) > 1: stack = streams[np.argmax([len(stream) for stream in streams])].copy() # If there is only one stream in the list, # return one stack for each channel and merge the stations else: channels = [] for tr in streams[0]: if not(tr.stats.channel in channels): channels.append(tr.stats.channel) stack = Stream() for i in range(0, len(channels)): stack.append(streams[0][0].copy()) stack[-1].stats['channel'] = channels[i] stack[-1].stats['station'] = 'all' # Initialize trace to 0 for tr in stack: tr.data = np.zeros(tr.stats.npts) # Initialize number of traces stacked to 0 ntr = np.zeros((len(stack))) # Stack traces for i in range(0, len(streams)): for k in range (0, len(stack)): if len(streams) > 1: matchtr = streams[i].select(station=stack[k].stats.station, \ channel=stack[k].stats.channel) else: matchtr = streams[i].select(channel=stack[k].stats.channel) for j in range(0, len(matchtr)): ntr[k] = ntr[k] + 1 # Normalize the data before stacking if normalize: norm = matchtr[j].data / \ np.sqrt(np.mean(np.square(matchtr[j].data))) norm = np.nan_to_num(norm) else: norm = matchtr[j].data stack[k].data = np.sum((np.power(np.abs(norm), 1.0 / weight) \ * np.sign(norm), stack[k].data), axis=0) # Take the power of the stack and divide by the number of traces stacked for k in range (0, len(stack)): stack[k].data = np.sign(stack[k].data) * np.power(stack[k].data, \ weight) / ntr[k] return stack
def powstack(streams, weight=2.0, normalize=True): """ Compute the power (Nth-root) stack of a list of streams Several streams -> returns a stack for each station and each channel One stream -> returns a stack for each channel (and merge stations) Input: type streams = list of streams streams = List of streams to stack type weight = float weight = Power of the stack (usually integer greater than 1) type normalize = boolean normalize = Normalize traces by RMS amplitude before stacking Output: type stack = stream stack = Stream with stacked traces for each channel (and each station) """ # If there are several streams in the list, # return one stack for each station and each channel if len(streams) > 1: stack = streams[np.argmax([len(stream) for stream in streams])].copy() # If there is only one stream in the list, # return one stack for each channel and merge the stations else: channels = [] for tr in streams[0]: if not(tr.stats.channel in channels): channels.append(tr.stats.channel) stack = Stream() for i in range(0, len(channels)): stack.append(streams[0][0].copy()) stack[-1].stats['channel'] = channels[i] stack[-1].stats['station'] = 'all' # Initialize trace to 0 for tr in stack: tr.data = np.zeros(tr.stats.npts) # Initialize number of traces stacked to 0 ntr = np.zeros((len(stack))) # Stack traces for i in range(0, len(streams)): for k in range (0, len(stack)): if len(streams) > 1: matchtr = streams[i].select(station=stack[k].stats.station, \ channel=stack[k].stats.channel) else: matchtr = streams[i].select(channel=stack[k].stats.channel) for j in range(0, len(matchtr)): ntr[k] = ntr[k] + 1 # Normalize the data before stacking if normalize: norm = matchtr[j].data / \ np.sqrt(np.mean(np.square(matchtr[j].data))) norm = np.nan_to_num(norm) else: norm = matchtr[j].data stack[k].data = np.sum((np.power(np.abs(norm), 1.0 / weight) \ * np.sign(norm), stack[k].data), axis=0) # Take the power of the stack and divide by the number of traces stacked for k in range (0, len(stack)): stack[k].data = np.sign(stack[k].data) * np.power(stack[k].data, \ weight) / ntr[k] return stack
Python
def PWstack(streams, weight=2, normalize=True): """ Compute the phase-weighted stack of a list of streams Several streams -> returns a stack for each station and each channel One stream -> returns a stack for each channel (and merge stations) Input: type streams = list of streams streams = List of streams to stack type weight = float weight = Power of the stack (usually integer greater than 1) type normalize = boolean normalize = Normalize traces by RMS amplitude before stacking Output: type stack = stream stack = Stream with stacked traces for each channel (and each station) """ # First get the linear stack which we will weight by the phase stack Linstack = linstack(streams, normalize=normalize) # Compute the instantaneous phase instaphases = [] for stream in streams: instaphase = stream.copy() for tr in instaphase: analytic = hilbert(tr.data) env = envelope(tr.data) tr.data = analytic / env tr.data = np.nan_to_num(tr.data) instaphases.append(instaphase) # Compute the phase stack Phasestack = linstack(instaphases, normalize=False) # Compute the phase-weighted stack for tr in Phasestack: tr.data = Linstack.select(station=tr.stats.station, \ channel=tr.stats.channel)[0].data \ * np.power(np.abs(tr.data), weight) return Phasestack
def PWstack(streams, weight=2, normalize=True): """ Compute the phase-weighted stack of a list of streams Several streams -> returns a stack for each station and each channel One stream -> returns a stack for each channel (and merge stations) Input: type streams = list of streams streams = List of streams to stack type weight = float weight = Power of the stack (usually integer greater than 1) type normalize = boolean normalize = Normalize traces by RMS amplitude before stacking Output: type stack = stream stack = Stream with stacked traces for each channel (and each station) """ # First get the linear stack which we will weight by the phase stack Linstack = linstack(streams, normalize=normalize) # Compute the instantaneous phase instaphases = [] for stream in streams: instaphase = stream.copy() for tr in instaphase: analytic = hilbert(tr.data) env = envelope(tr.data) tr.data = analytic / env tr.data = np.nan_to_num(tr.data) instaphases.append(instaphase) # Compute the phase stack Phasestack = linstack(instaphases, normalize=False) # Compute the phase-weighted stack for tr in Phasestack: tr.data = Linstack.select(station=tr.stats.station, \ channel=tr.stats.channel)[0].data \ * np.power(np.abs(tr.data), weight) return Phasestack
Python
def plot_stack_acorr(arrayName, x0, y0, type_stack, w, Tmax, amp, amp_lin, \ amp_pow, amp_PWS, n1, n2): """ This function stacks the autocorrelation over all the tremor windows and plot the stack Input: type arrayName = string arrayName = Name of seismic array type x0 = float x0 = Distance of the center of the cell from the array (east) type y0 = float y0 = Distance of the center of the cell from the array (north) type type_stack = string type_stack = Type of stack ('lin', 'pow', 'PWS') type w = float w = Power of the stack (for 'pow' and 'PWS') type Tmax = float Tmax = Maximum time lag for autocorrelation plot type amp = float amp = Amplification factor of autocorrelation for plotting type amp_lin = float amp_lin = Amplification factor of linear stack for plotting type amp_pow = float amp_pow = Amplification factor of power stack for plotting type amp_PWS = float amp_PWS = Amplification factor of phase-weighted stack for plotting type n1 = integer n1 = Index of first tremor to be plotted type n2 = integer n2 = Index of last tremor to be plotted """ # Read file containing data from stack_acorr_tremor filename = 'ac/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}.pkl'.format( \ arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), int(y0), \ type_stack) data = pickle.load(open(filename, 'rb')) EW = data[6] NS = data[7] UD = data[8] # Stack over all tremor windows EW_lin = linstack([EW], normalize=False)[0] EW_pow = powstack([EW], w, normalize=False)[0] EW_PWS = PWstack([EW], w, normalize=False)[0] NS_lin = linstack([NS], normalize=False)[0] NS_pow = powstack([NS], w, normalize=False)[0] NS_PWS = PWstack([NS], w, normalize=False)[0] UD_lin = linstack([UD], normalize=False)[0] UD_pow = powstack([UD], w, normalize=False)[0] UD_PWS = PWstack([UD], w, normalize=False)[0] # Plot plt.figure(1, figsize=(30, 15)) # EW autocorrelation ax1 = plt.subplot(131) for i in range(n1, n2): dt = EW[i].stats.delta ncor = int((EW[i].stats.npts - 1) / 2) t = dt * np.arange(- ncor, ncor + 1) plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * EW[i].data, 'k-') plt.plot(t, - 2.0 + amp_lin * EW_lin.data, 'r-') plt.plot(t, - 2.0 + amp_pow * EW_pow.data, 'b-') plt.plot(t, - 2.0 + amp_PWS * EW_PWS.data, 'g-') plt.xlim(0, Tmax) plt.ylim(- 5.0, 2.0 * (n2 - n1)) plt.title('East component', fontsize=24) plt.xlabel('Lag time (s)', fontsize=24) plt.ylabel('Autocorrelation', fontsize=24) ax1.set_yticklabels([]) ax1.tick_params(labelsize=20) # NS autocorrelation ax2 = plt.subplot(132) for i in range(n1, n2): dt = NS[i].stats.delta ncor = int((NS[i].stats.npts - 1) / 2) t = dt * np.arange(- ncor, ncor + 1) plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * NS[i].data, 'k-') plt.plot(t, - 2.0 + amp_lin * NS_lin.data, 'r-') plt.plot(t, - 2.0 + amp_pow * NS_pow.data, 'b-') plt.plot(t, - 2.0 + amp_PWS * NS_PWS.data, 'g-') plt.xlim(0, Tmax) plt.ylim(- 5.0, 2.0 * (n2 - n1)) plt.title('North component', fontsize=24) plt.xlabel('Lag time (s)', fontsize=24) plt.ylabel('Autocorrelation', fontsize=24) ax2.set_yticklabels([]) ax2.tick_params(labelsize=20) # UD autocorrelation ax3 = plt.subplot(133) for i in range(n1, n2): dt = UD[i].stats.delta ncor = int((UD[i].stats.npts - 1) / 2) t = dt * np.arange(- ncor, ncor + 1) plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * UD[i].data, 'k-') plt.plot(t, - 2.0 + amp_lin * UD_lin.data, 'r-') plt.plot(t, - 2.0 + amp_pow * UD_pow.data, 'b-') plt.plot(t, - 2.0 + amp_PWS * UD_PWS.data, 'g-') plt.xlim(0, Tmax) plt.ylim(- 5.0, 2.0 * (n2 - n1)) plt.title('Vertical component', fontsize=24) plt.xlabel('Lag time (s)', fontsize=24) plt.ylabel('Autocorrelation', fontsize=24) ax3.set_yticklabels([]) ax3.tick_params(labelsize=20) # End figure and plot plt.suptitle('{} at {} km, {} km'.format(arrayName, x0, y0), fontsize=24) plt.savefig('ac/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}.eps'.format( \ arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), int(y0), \ type_stack), format='eps') ax1.clear() ax2.clear() ax3.clear() plt.close(1)
def plot_stack_acorr(arrayName, x0, y0, type_stack, w, Tmax, amp, amp_lin, \ amp_pow, amp_PWS, n1, n2): """ This function stacks the autocorrelation over all the tremor windows and plot the stack Input: type arrayName = string arrayName = Name of seismic array type x0 = float x0 = Distance of the center of the cell from the array (east) type y0 = float y0 = Distance of the center of the cell from the array (north) type type_stack = string type_stack = Type of stack ('lin', 'pow', 'PWS') type w = float w = Power of the stack (for 'pow' and 'PWS') type Tmax = float Tmax = Maximum time lag for autocorrelation plot type amp = float amp = Amplification factor of autocorrelation for plotting type amp_lin = float amp_lin = Amplification factor of linear stack for plotting type amp_pow = float amp_pow = Amplification factor of power stack for plotting type amp_PWS = float amp_PWS = Amplification factor of phase-weighted stack for plotting type n1 = integer n1 = Index of first tremor to be plotted type n2 = integer n2 = Index of last tremor to be plotted """ # Read file containing data from stack_acorr_tremor filename = 'ac/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}.pkl'.format( \ arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), int(y0), \ type_stack) data = pickle.load(open(filename, 'rb')) EW = data[6] NS = data[7] UD = data[8] # Stack over all tremor windows EW_lin = linstack([EW], normalize=False)[0] EW_pow = powstack([EW], w, normalize=False)[0] EW_PWS = PWstack([EW], w, normalize=False)[0] NS_lin = linstack([NS], normalize=False)[0] NS_pow = powstack([NS], w, normalize=False)[0] NS_PWS = PWstack([NS], w, normalize=False)[0] UD_lin = linstack([UD], normalize=False)[0] UD_pow = powstack([UD], w, normalize=False)[0] UD_PWS = PWstack([UD], w, normalize=False)[0] # Plot plt.figure(1, figsize=(30, 15)) # EW autocorrelation ax1 = plt.subplot(131) for i in range(n1, n2): dt = EW[i].stats.delta ncor = int((EW[i].stats.npts - 1) / 2) t = dt * np.arange(- ncor, ncor + 1) plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * EW[i].data, 'k-') plt.plot(t, - 2.0 + amp_lin * EW_lin.data, 'r-') plt.plot(t, - 2.0 + amp_pow * EW_pow.data, 'b-') plt.plot(t, - 2.0 + amp_PWS * EW_PWS.data, 'g-') plt.xlim(0, Tmax) plt.ylim(- 5.0, 2.0 * (n2 - n1)) plt.title('East component', fontsize=24) plt.xlabel('Lag time (s)', fontsize=24) plt.ylabel('Autocorrelation', fontsize=24) ax1.set_yticklabels([]) ax1.tick_params(labelsize=20) # NS autocorrelation ax2 = plt.subplot(132) for i in range(n1, n2): dt = NS[i].stats.delta ncor = int((NS[i].stats.npts - 1) / 2) t = dt * np.arange(- ncor, ncor + 1) plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * NS[i].data, 'k-') plt.plot(t, - 2.0 + amp_lin * NS_lin.data, 'r-') plt.plot(t, - 2.0 + amp_pow * NS_pow.data, 'b-') plt.plot(t, - 2.0 + amp_PWS * NS_PWS.data, 'g-') plt.xlim(0, Tmax) plt.ylim(- 5.0, 2.0 * (n2 - n1)) plt.title('North component', fontsize=24) plt.xlabel('Lag time (s)', fontsize=24) plt.ylabel('Autocorrelation', fontsize=24) ax2.set_yticklabels([]) ax2.tick_params(labelsize=20) # UD autocorrelation ax3 = plt.subplot(133) for i in range(n1, n2): dt = UD[i].stats.delta ncor = int((UD[i].stats.npts - 1) / 2) t = dt * np.arange(- ncor, ncor + 1) plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * UD[i].data, 'k-') plt.plot(t, - 2.0 + amp_lin * UD_lin.data, 'r-') plt.plot(t, - 2.0 + amp_pow * UD_pow.data, 'b-') plt.plot(t, - 2.0 + amp_PWS * UD_PWS.data, 'g-') plt.xlim(0, Tmax) plt.ylim(- 5.0, 2.0 * (n2 - n1)) plt.title('Vertical component', fontsize=24) plt.xlabel('Lag time (s)', fontsize=24) plt.ylabel('Autocorrelation', fontsize=24) ax3.set_yticklabels([]) ax3.tick_params(labelsize=20) # End figure and plot plt.suptitle('{} at {} km, {} km'.format(arrayName, x0, y0), fontsize=24) plt.savefig('ac/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}.eps'.format( \ arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), int(y0), \ type_stack), format='eps') ax1.clear() ax2.clear() ax3.clear() plt.close(1)
Python
def compute_time(h, v, i0): """ Compute the travel time from the velocity profile and the incidence angle Input: type h = 1D numpy array h = Thicknesses of the layers type v = 1D numpy array v = Seismic wave velocities type i0 = float i0 = Incidence angle Output: type d0 = float d0 = Distance from epicenter type t0 = float t0 = Travel time """ N = np.shape(h)[0] d = np.zeros(N) l = np.zeros(N) t = np.zeros(N) i = np.zeros(N) i[0] = i0 for j in range(0, N): d[j] = h[j] * tan(i[j] * pi / 180.0) l[j] = h[j] / cos(i[j] * pi / 180.0) t[j] = l[j] / v[j] if j < N - 1: if abs(v[j + 1] * sin(i[j] * pi / 180.0) / v[j]) > 1.0: return (10000.0, 10000.0) else: i[j + 1] = asin(v[j + 1] * sin(i[j] * pi / 180.0) \ / v[j]) * 180.0 / pi d0 = np.sum(d) t0 = np.sum(t) return (d0, t0)
def compute_time(h, v, i0): """ Compute the travel time from the velocity profile and the incidence angle Input: type h = 1D numpy array h = Thicknesses of the layers type v = 1D numpy array v = Seismic wave velocities type i0 = float i0 = Incidence angle Output: type d0 = float d0 = Distance from epicenter type t0 = float t0 = Travel time """ N = np.shape(h)[0] d = np.zeros(N) l = np.zeros(N) t = np.zeros(N) i = np.zeros(N) i[0] = i0 for j in range(0, N): d[j] = h[j] * tan(i[j] * pi / 180.0) l[j] = h[j] / cos(i[j] * pi / 180.0) t[j] = l[j] / v[j] if j < N - 1: if abs(v[j + 1] * sin(i[j] * pi / 180.0) / v[j]) > 1.0: return (10000.0, 10000.0) else: i[j + 1] = asin(v[j + 1] * sin(i[j] * pi / 180.0) \ / v[j]) * 180.0 / pi d0 = np.sum(d) t0 = np.sum(t) return (d0, t0)
Python
def cli(env, identifier, postinstall, key): """Reload operating system on a virtual server.""" vsi = SoftLayer.VSManager(env.client) vs_id = helpers.resolve_id(vsi.resolve_ids, identifier, 'VS') keys = [] if key: for single_key in key: resolver = SoftLayer.SshKeyManager(env.client).resolve_ids key_id = helpers.resolve_id(resolver, single_key, 'SshKey') keys.append(key_id) if not (env.skip_confirmations or formatting.no_going_back(vs_id)): raise exceptions.CLIAbort('Aborted') vsi.reload_instance(vs_id, postinstall, keys)
def cli(env, identifier, postinstall, key): """Reload operating system on a virtual server.""" vsi = SoftLayer.VSManager(env.client) vs_id = helpers.resolve_id(vsi.resolve_ids, identifier, 'VS') keys = [] if key: for single_key in key: resolver = SoftLayer.SshKeyManager(env.client).resolve_ids key_id = helpers.resolve_id(resolver, single_key, 'SshKey') keys.append(key_id) if not (env.skip_confirmations or formatting.no_going_back(vs_id)): raise exceptions.CLIAbort('Aborted') vsi.reload_instance(vs_id, postinstall, keys)
Python
def ping(self, datacenter=None, network=None): """Ping a message queue endpoint.""" resp = requests.get('%s/v1/ping' % self.get_endpoint(datacenter, network)) resp.raise_for_status() return True
def ping(self, datacenter=None, network=None): """Ping a message queue endpoint.""" resp = requests.get('%s/v1/ping' % self.get_endpoint(datacenter, network)) resp.raise_for_status() return True
Python
def _make_request(self, method, path, **kwargs): """Make request. Generally not called directly. :param method: HTTP Method :param path: resource Path :param dict \\*\\*kwargs: extra request arguments """ headers = { 'Content-Type': 'application/json', 'User-Agent': consts.USER_AGENT, } headers.update(kwargs.get('headers', {})) kwargs['headers'] = headers kwargs['auth'] = self.auth url = '/'.join((self.endpoint, 'v1', self.account_id, path)) resp = requests.request(method, url, **kwargs) try: resp.raise_for_status() except requests.HTTPError as ex: content = json.loads(ex.response.content) raise exceptions.SoftLayerAPIError(ex.response.status_code, content['message']) return resp
def _make_request(self, method, path, **kwargs): """Make request. Generally not called directly. :param method: HTTP Method :param path: resource Path :param dict \\*\\*kwargs: extra request arguments """ headers = { 'Content-Type': 'application/json', 'User-Agent': consts.USER_AGENT, } headers.update(kwargs.get('headers', {})) kwargs['headers'] = headers kwargs['auth'] = self.auth url = '/'.join((self.endpoint, 'v1', self.account_id, path)) resp = requests.request(method, url, **kwargs) try: resp.raise_for_status() except requests.HTTPError as ex: content = json.loads(ex.response.content) raise exceptions.SoftLayerAPIError(ex.response.status_code, content['message']) return resp
Python
def authenticate(self, username, api_key, auth_token=None): """Authenticate this connection using the given credentials. :param username: SoftLayer username :param api_key: SoftLayer API Key :param auth_token: (optional) Starting auth token """ auth_endpoint = '/'.join((self.endpoint, 'v1', self.account_id, 'auth')) auth = QueueAuth(auth_endpoint, username, api_key, auth_token=auth_token) auth.auth() self.auth = auth
def authenticate(self, username, api_key, auth_token=None): """Authenticate this connection using the given credentials. :param username: SoftLayer username :param api_key: SoftLayer API Key :param auth_token: (optional) Starting auth token """ auth_endpoint = '/'.join((self.endpoint, 'v1', self.account_id, 'auth')) auth = QueueAuth(auth_endpoint, username, api_key, auth_token=auth_token) auth.auth() self.auth = auth
Python
def pop_messages(self, queue_name, count=1): """Pop messages from a queue. :param queue_name: Queue Name :param count: (optional) number of messages to retrieve """ resp = self._make_request('get', 'queues/%s/messages' % queue_name, params={'batch': count}) return resp.json()
def pop_messages(self, queue_name, count=1): """Pop messages from a queue. :param queue_name: Queue Name :param count: (optional) number of messages to retrieve """ resp = self._make_request('get', 'queues/%s/messages' % queue_name, params={'batch': count}) return resp.json()
Python
def pop_message(self, queue_name): """Pop a single message from a queue. If no messages are returned this returns None :param queue_name: Queue Name """ messages = self.pop_messages(queue_name, count=1) if messages['item_count'] > 0: return messages['items'][0] else: return None
def pop_message(self, queue_name): """Pop a single message from a queue. If no messages are returned this returns None :param queue_name: Queue Name """ messages = self.pop_messages(queue_name, count=1) if messages['item_count'] > 0: return messages['items'][0] else: return None
Python
def _format_object_mask(objectmask, service): """Format new and old style object masks into proper headers. :param objectmask: a string- or dict-based object mask :param service: a SoftLayer API service name """ if isinstance(objectmask, dict): mheader = '%sObjectMask' % service else: mheader = 'SoftLayer_ObjectMask' objectmask = objectmask.strip() if (not objectmask.startswith('mask') and not objectmask.startswith('[')): objectmask = "mask[%s]" % objectmask return {mheader: {'mask': objectmask}}
def _format_object_mask(objectmask, service): """Format new and old style object masks into proper headers. :param objectmask: a string- or dict-based object mask :param service: a SoftLayer API service name """ if isinstance(objectmask, dict): mheader = '%sObjectMask' % service else: mheader = 'SoftLayer_ObjectMask' objectmask = objectmask.strip() if (not objectmask.startswith('mask') and not objectmask.startswith('[')): objectmask = "mask[%s]" % objectmask return {mheader: {'mask': objectmask}}
Python
def cli(env, service, method, parameters, _id, mask, limit, offset): """Call arbitrary API endpoints with the given SERVICE and METHOD. \b Examples: slcli call-api Account getObject slcli call-api Account getVirtualGuests --limit=10 --mask=id,hostname slcli call-api Virtual_Guest getObject --id=12345 slcli call-api Metric_Tracking_Object getBandwidthData --id=1234 \\ "2015-01-01 00:00:00" "2015-01-1 12:00:00" public """ result = env.client.call(service, method, *parameters, id=_id, mask=mask, limit=limit, offset=offset) env.fout(formatting.iter_to_table(result))
def cli(env, service, method, parameters, _id, mask, limit, offset): """Call arbitrary API endpoints with the given SERVICE and METHOD. \b Examples: slcli call-api Account getObject slcli call-api Account getVirtualGuests --limit=10 --mask=id,hostname slcli call-api Virtual_Guest getObject --id=12345 slcli call-api Metric_Tracking_Object getBandwidthData --id=1234 \\ "2015-01-01 00:00:00" "2015-01-1 12:00:00" public """ result = env.client.call(service, method, *parameters, id=_id, mask=mask, limit=limit, offset=offset) env.fout(formatting.iter_to_table(result))
Python
def cli(ctx, env): """Enters a shell for slcli.""" # Set up the environment env = copy.deepcopy(env) env.load_modules_from_python(routes.ALL_ROUTES) env.aliases.update(routes.ALL_ALIASES) env.vars['global_args'] = ctx.parent.params env.vars['is_shell'] = True env.vars['last_exit_code'] = 0 # Set up prompt_toolkit settings app_path = click.get_app_dir('softlayer') if not os.path.exists(app_path): os.makedirs(app_path) history = p_history.FileHistory(os.path.join(app_path, 'history')) complete = completer.ShellCompleter() while True: try: line = p_shortcuts.get_input( u"(%s)> " % env.vars['last_exit_code'], completer=complete, history=history, ) try: args = shlex.split(line) except ValueError as ex: print("Invalid Command: %s" % ex) continue if not args: continue # Reset client so that the client gets refreshed env.client = None core.main(args=list(get_env_args(env)) + args, obj=env, prog_name="", reraise_exceptions=True) except SystemExit as ex: env.vars['last_exit_code'] = ex.code except KeyboardInterrupt: env.vars['last_exit_code'] = 1 except EOFError: return except ShellExit: return except Exception as ex: env.vars['last_exit_code'] = 1 traceback.print_exc(file=sys.stderr)
def cli(ctx, env): """Enters a shell for slcli.""" # Set up the environment env = copy.deepcopy(env) env.load_modules_from_python(routes.ALL_ROUTES) env.aliases.update(routes.ALL_ALIASES) env.vars['global_args'] = ctx.parent.params env.vars['is_shell'] = True env.vars['last_exit_code'] = 0 # Set up prompt_toolkit settings app_path = click.get_app_dir('softlayer') if not os.path.exists(app_path): os.makedirs(app_path) history = p_history.FileHistory(os.path.join(app_path, 'history')) complete = completer.ShellCompleter() while True: try: line = p_shortcuts.get_input( u"(%s)> " % env.vars['last_exit_code'], completer=complete, history=history, ) try: args = shlex.split(line) except ValueError as ex: print("Invalid Command: %s" % ex) continue if not args: continue # Reset client so that the client gets refreshed env.client = None core.main(args=list(get_env_args(env)) + args, obj=env, prog_name="", reraise_exceptions=True) except SystemExit as ex: env.vars['last_exit_code'] = ex.code except KeyboardInterrupt: env.vars['last_exit_code'] = 1 except EOFError: return except ShellExit: return except Exception as ex: env.vars['last_exit_code'] = 1 traceback.print_exc(file=sys.stderr)
Python
def cli(env, format='table', config=None, debug=0, verbose=0, proxy=None, really=False, fixtures=False, **kwargs): """Main click CLI entry-point.""" # Set logging level if debug is not None: verbose = int(debug) if verbose: logger = logging.getLogger() logger.addHandler(logging.StreamHandler()) logger.setLevel(DEBUG_LOGGING_MAP.get(verbose, logging.DEBUG)) # Populate environement with client and set it as the context object env.skip_confirmations = really env.config_file = config env.format = format if env.client is None: # Environment can be passed in explicitly. This is used for testing if fixtures: client = SoftLayer.BaseClient( transport=SoftLayer.FixtureTransport(), auth=None, ) else: # Create SL Client client = SoftLayer.create_client_from_env( proxy=proxy, config_file=config, ) env.client = client env.vars['timings'] = SoftLayer.TimingTransport(env.client.transport) env.client.transport = env.vars['timings']
def cli(env, format='table', config=None, debug=0, verbose=0, proxy=None, really=False, fixtures=False, **kwargs): """Main click CLI entry-point.""" # Set logging level if debug is not None: verbose = int(debug) if verbose: logger = logging.getLogger() logger.addHandler(logging.StreamHandler()) logger.setLevel(DEBUG_LOGGING_MAP.get(verbose, logging.DEBUG)) # Populate environement with client and set it as the context object env.skip_confirmations = really env.config_file = config env.format = format if env.client is None: # Environment can be passed in explicitly. This is used for testing if fixtures: client = SoftLayer.BaseClient( transport=SoftLayer.FixtureTransport(), auth=None, ) else: # Create SL Client client = SoftLayer.create_client_from_env( proxy=proxy, config_file=config, ) env.client = client env.vars['timings'] = SoftLayer.TimingTransport(env.client.transport) env.client.transport = env.vars['timings']
Python
def output_result(env, timings=False, *args, **kwargs): """Outputs the results returned by the CLI and also outputs timings.""" if timings and env.vars.get('timings'): timing_table = formatting.Table(['service', 'method', 'time']) calls = env.vars['timings'].get_last_calls() for call, _, duration in calls: timing_table.add_row([call.service, call.method, duration]) env.err(env.fmt(timing_table))
def output_result(env, timings=False, *args, **kwargs): """Outputs the results returned by the CLI and also outputs timings.""" if timings and env.vars.get('timings'): timing_table = formatting.Table(['service', 'method', 'time']) calls = env.vars['timings'].get_last_calls() for call, _, duration in calls: timing_table.add_row([call.service, call.method, duration]) env.err(env.fmt(timing_table))
Python
def main(reraise_exceptions=False, **kwargs): """Main program. Catches several common errors and displays them nicely.""" exit_status = 0 try: cli.main(**kwargs) except SoftLayer.SoftLayerAPIError as ex: if 'invalid api token' in ex.faultString.lower(): print("Authentication Failed: To update your credentials," " use 'slcli config setup'") exit_status = 1 else: print(str(ex)) exit_status = 1 except SoftLayer.SoftLayerError as ex: print(str(ex)) exit_status = 1 except exceptions.CLIAbort as ex: print(str(ex.message)) exit_status = ex.code except Exception: if reraise_exceptions: raise import traceback print("An unexpected error has occured:") print(str(traceback.format_exc())) print("Feel free to report this error as it is likely a bug:") print(" https://github.com/softlayer/softlayer-python/issues") exit_status = 1 sys.exit(exit_status)
def main(reraise_exceptions=False, **kwargs): """Main program. Catches several common errors and displays them nicely.""" exit_status = 0 try: cli.main(**kwargs) except SoftLayer.SoftLayerAPIError as ex: if 'invalid api token' in ex.faultString.lower(): print("Authentication Failed: To update your credentials," " use 'slcli config setup'") exit_status = 1 else: print(str(ex)) exit_status = 1 except SoftLayer.SoftLayerError as ex: print(str(ex)) exit_status = 1 except exceptions.CLIAbort as ex: print(str(ex.message)) exit_status = ex.code except Exception: if reraise_exceptions: raise import traceback print("An unexpected error has occured:") print(str(traceback.format_exc())) print("Feel free to report this error as it is likely a bug:") print(" https://github.com/softlayer/softlayer-python/issues") exit_status = 1 sys.exit(exit_status)
Python
def generate_order_template(self, quote_id, extra, quantity=1): """Generate a complete order template. :param int quote_id: ID of target quote :param list extra: List of dictionaries that have extra details about the order such as hostname or domain names for virtual servers or hardware nodes :param int quantity: Number of ~things~ to order """ container = self.get_order_container(quote_id) container['quantity'] = quantity # NOTE(kmcdonald): This will only work with virtualGuests and hardware. # There has to be a better way, since this is based on # an existing quote that supposedly knows about this # detail if container['packageId'] == 46: product_type = 'virtualGuests' else: product_type = 'hardware' if len(extra) != quantity: raise ValueError("You must specify extra for each server in the " "quote") container[product_type] = [] for extra_details in extra: container[product_type].append(extra_details) container['presetId'] = None return container
def generate_order_template(self, quote_id, extra, quantity=1): """Generate a complete order template. :param int quote_id: ID of target quote :param list extra: List of dictionaries that have extra details about the order such as hostname or domain names for virtual servers or hardware nodes :param int quantity: Number of ~things~ to order """ container = self.get_order_container(quote_id) container['quantity'] = quantity # NOTE(kmcdonald): This will only work with virtualGuests and hardware. # There has to be a better way, since this is based on # an existing quote that supposedly knows about this # detail if container['packageId'] == 46: product_type = 'virtualGuests' else: product_type = 'hardware' if len(extra) != quantity: raise ValueError("You must specify extra for each server in the " "quote") container[product_type] = [] for extra_details in extra: container[product_type].append(extra_details) container['presetId'] = None return container
Python
def verify_quote(self, quote_id, extra, quantity=1): """Verifies that a quote order is valid. :param int quote_id: ID for the target quote :param list hostnames: hostnames of the servers :param string domain: domain of the new servers :param int quantity: Quantity to override default """ container = self.generate_order_template(quote_id, extra, quantity=quantity) return self.client['Product_Order'].verifyOrder(container)
def verify_quote(self, quote_id, extra, quantity=1): """Verifies that a quote order is valid. :param int quote_id: ID for the target quote :param list hostnames: hostnames of the servers :param string domain: domain of the new servers :param int quantity: Quantity to override default """ container = self.generate_order_template(quote_id, extra, quantity=quantity) return self.client['Product_Order'].verifyOrder(container)
Python
def order_quote(self, quote_id, extra, quantity=1): """Places an order using a quote :param int quote_id: ID for the target quote :param list hostnames: hostnames of the servers :param string domain: domain of the new server :param int quantity: Quantity to override default """ container = self.generate_order_template(quote_id, extra, quantity=quantity) return self.client['Product_Order'].placeOrder(container)
def order_quote(self, quote_id, extra, quantity=1): """Places an order using a quote :param int quote_id: ID for the target quote :param list hostnames: hostnames of the servers :param string domain: domain of the new server :param int quantity: Quantity to override default """ container = self.generate_order_template(quote_id, extra, quantity=quantity) return self.client['Product_Order'].placeOrder(container)
Python
def row(self, data): """Return a formatted row for the given data.""" for column in self.column_funcs: if callable(column): yield column(data) else: yield utils.lookup(data, *column)
def row(self, data): """Return a formatted row for the given data.""" for column in self.column_funcs: if callable(column): yield column(data) else: yield utils.lookup(data, *column)
Python
def _click_autocomplete(root, text): """Completer generator for click applications.""" try: parts = shlex.split(text) except ValueError: return [] location, incomplete = _click_resolve_command(root, parts) if not text.endswith(' ') and not incomplete and text: return [] options = [] if incomplete and not incomplete[0:2].isalnum(): for param in location.params: if not isinstance(param, click.Option): continue options.extend(param.opts) options.extend(param.secondary_opts) elif isinstance(location, (click.MultiCommand, click.core.Group)): options.extend(location.list_commands(click.Context(location))) # collect options that starts with the incomplete section completions = [] for option in options: if option.startswith(incomplete): completions.append( completion.Completion(option, -len(incomplete))) return completions
def _click_autocomplete(root, text): """Completer generator for click applications.""" try: parts = shlex.split(text) except ValueError: return [] location, incomplete = _click_resolve_command(root, parts) if not text.endswith(' ') and not incomplete and text: return [] options = [] if incomplete and not incomplete[0:2].isalnum(): for param in location.params: if not isinstance(param, click.Option): continue options.extend(param.opts) options.extend(param.secondary_opts) elif isinstance(location, (click.MultiCommand, click.core.Group)): options.extend(location.list_commands(click.Context(location))) # collect options that starts with the incomplete section completions = [] for option in options: if option.startswith(incomplete): completions.append( completion.Completion(option, -len(incomplete))) return completions
Python
def _click_resolve_command(root, parts): """Return the click command and the left over text given some vargs.""" location = root incomplete = '' for part in parts: incomplete = part if not part[0:2].isalnum(): continue try: next_location = location.get_command(click.Context(location), part) if next_location is not None: location = next_location incomplete = '' except AttributeError: break return location, incomplete
def _click_resolve_command(root, parts): """Return the click command and the left over text given some vargs.""" location = root incomplete = '' for part in parts: incomplete = part if not part[0:2].isalnum(): continue try: next_location = location.get_command(click.Context(location), part) if next_location is not None: location = next_location incomplete = '' except AttributeError: break return location, incomplete
Python
def _find_item_prices(self, size, categorycode=''): """Retrieves the Item Price IDs.""" item_prices = self.client['Product_Package'].getItems( id=0, mask='id,capacity,prices[id]', filter={ 'items': { 'capacity': {'operation': int(size)}, 'categories': { 'categoryCode': {'operation': categorycode} }}}) for item_price in item_prices: for price in item_price['prices']: return price['id'] raise exceptions.SoftLayerError( "Could not find a valid price with for the given size")
def _find_item_prices(self, size, categorycode=''): """Retrieves the Item Price IDs.""" item_prices = self.client['Product_Package'].getItems( id=0, mask='id,capacity,prices[id]', filter={ 'items': { 'capacity': {'operation': int(size)}, 'categories': { 'categoryCode': {'operation': categorycode} }}}) for item_price in item_prices: for price in item_price['prices']: return price['id'] raise exceptions.SoftLayerError( "Could not find a valid price with for the given size")
Python
def _get_location_id(self, location): """Returns location id of datacenter for ProductOrder::placeOrder().""" loc_svc = self.client['Location_Datacenter'] datacenters = loc_svc.getDatacenters(mask='mask[longName,id,name]') for datacenter in datacenters: if datacenter['name'] == location: location = datacenter['id'] return location raise ValueError('Invalid datacenter name specified.')
def _get_location_id(self, location): """Returns location id of datacenter for ProductOrder::placeOrder().""" loc_svc = self.client['Location_Datacenter'] datacenters = loc_svc.getDatacenters(mask='mask[longName,id,name]') for datacenter in datacenters: if datacenter['name'] == location: location = datacenter['id'] return location raise ValueError('Invalid datacenter name specified.')
Python
def create_iscsi(self, size=None, location=None): """Places an order for iSCSI volume. :param integer size: size of iSCSI volume to create :param string location: datacenter to use to create volume in """ item_price = self._find_item_prices(int(size), categorycode='iscsi') iscsi_order = self._build_order(item_price, location) return self.product_order.placeOrder(iscsi_order)
def create_iscsi(self, size=None, location=None): """Places an order for iSCSI volume. :param integer size: size of iSCSI volume to create :param string location: datacenter to use to create volume in """ item_price = self._find_item_prices(int(size), categorycode='iscsi') iscsi_order = self._build_order(item_price, location) return self.product_order.placeOrder(iscsi_order)
Python
def cancel_iscsi(self, volume_id, reason='unNeeded', immediate=False): """Cancels the given iSCSI volume. :param integer volume_id: the volume ID """ iscsi = self.get_iscsi( volume_id, mask='mask[id,capacityGb,username,password,billingItem[id]]') billingitemid = iscsi['billingItem']['id'] self.client['Billing_Item'].cancelItem( immediate, True, reason, id=billingitemid)
def cancel_iscsi(self, volume_id, reason='unNeeded', immediate=False): """Cancels the given iSCSI volume. :param integer volume_id: the volume ID """ iscsi = self.get_iscsi( volume_id, mask='mask[id,capacityGb,username,password,billingItem[id]]') billingitemid = iscsi['billingItem']['id'] self.client['Billing_Item'].cancelItem( immediate, True, reason, id=billingitemid)
Python
def create_snapshot(self, volume_id, notes='No longer needed'): """Orders a snapshot for given volume. :param integer volume_id: the volume ID """ self.iscsi_svc.createSnapshot(notes, id=volume_id)
def create_snapshot(self, volume_id, notes='No longer needed'): """Orders a snapshot for given volume. :param integer volume_id: the volume ID """ self.iscsi_svc.createSnapshot(notes, id=volume_id)
Python
def create_snapshot_space(self, volume_id, capacity): """Orders a snapshot space for given volume. :param integer volume_id: the volume ID :param integer capacity: capacity in ~GB """ item_price = self._find_item_prices( int(capacity), categorycode='iscsi_snapshot_space') result = self.get_iscsi( volume_id, mask='mask[id,capacityGb,serviceResource[datacenter]]') snapshotspaceorder = { 'complexType': 'SoftLayer_Container_Product_Order_\ Network_Storage_Iscsi_SnapshotSpace', 'location': result['serviceResource']['datacenter']['id'], 'packageId': 0, 'prices': [{'id': item_price}], 'quantity': 1, 'volumeId': volume_id} self.product_order.placeOrder(snapshotspaceorder)
def create_snapshot_space(self, volume_id, capacity): """Orders a snapshot space for given volume. :param integer volume_id: the volume ID :param integer capacity: capacity in ~GB """ item_price = self._find_item_prices( int(capacity), categorycode='iscsi_snapshot_space') result = self.get_iscsi( volume_id, mask='mask[id,capacityGb,serviceResource[datacenter]]') snapshotspaceorder = { 'complexType': 'SoftLayer_Container_Product_Order_\ Network_Storage_Iscsi_SnapshotSpace', 'location': result['serviceResource']['datacenter']['id'], 'packageId': 0, 'prices': [{'id': item_price}], 'quantity': 1, 'volumeId': volume_id} self.product_order.placeOrder(snapshotspaceorder)
Python
def restore_from_snapshot(self, volume_id, snapshot_id): """Restore the volume to snapshot's contents. :params: imteger volume_id: the volume ID :params: integer snapshot_id: the snapshot ID """ self.iscsi_svc.restoreFromSnapshot(snapshot_id, id=volume_id)
def restore_from_snapshot(self, volume_id, snapshot_id): """Restore the volume to snapshot's contents. :params: imteger volume_id: the volume ID :params: integer snapshot_id: the snapshot ID """ self.iscsi_svc.restoreFromSnapshot(snapshot_id, id=volume_id)
Python
def _get_network(self, kind, router=True, vlans=True, vlan_ids=True): """Wrapper for getting details about networks. :param string kind: network kind. Typically 'public' or 'private' :param boolean router: flag to include router information :param boolean vlans: flag to include vlan information :param boolean vlan_ids: flag to include vlan_ids """ network = {} macs = self.get('%s_mac' % kind) network['mac_addresses'] = macs if len(macs) == 0: return network if router: network['router'] = self.get('router', macs[0]) if vlans: network['vlans'] = self.get('vlans', macs[0]) if vlan_ids: network['vlan_ids'] = self.get('vlan_ids', macs[0]) return network
def _get_network(self, kind, router=True, vlans=True, vlan_ids=True): """Wrapper for getting details about networks. :param string kind: network kind. Typically 'public' or 'private' :param boolean router: flag to include router information :param boolean vlans: flag to include vlan information :param boolean vlan_ids: flag to include vlan_ids """ network = {} macs = self.get('%s_mac' % kind) network['mac_addresses'] = macs if len(macs) == 0: return network if router: network['router'] = self.get('router', macs[0]) if vlans: network['vlans'] = self.get('vlans', macs[0]) if vlan_ids: network['vlan_ids'] = self.get('vlan_ids', macs[0]) return network
Python
def public_network(self, **kwargs): """Returns details about the public network. :param boolean router: True to return router details :param boolean vlans: True to return vlan details :param boolean vlan_ids: True to return vlan_ids """ return self._get_network('frontend', **kwargs)
def public_network(self, **kwargs): """Returns details about the public network. :param boolean router: True to return router details :param boolean vlans: True to return vlan details :param boolean vlan_ids: True to return vlan_ids """ return self._get_network('frontend', **kwargs)
Python
def private_network(self, **kwargs): """Returns details about the private network. :param boolean router: True to return router details :param boolean vlans: True to return vlan details :param boolean vlan_ids: True to return vlan_ids """ return self._get_network('backend', **kwargs)
def private_network(self, **kwargs): """Returns details about the private network. :param boolean router: True to return router details :param boolean vlans: True to return vlan details :param boolean vlan_ids: True to return vlan_ids """ return self._get_network('backend', **kwargs)
Python
def tracedmodule_to_onnx( traced_module, output="out.onnx", *, graph_name="graph", opset=8, outspec=None, input_data_type: str = None, input_scales: Union[float, List[float]] = None, input_zero_points: Union[int, List[int]] = None, require_quantize=False, param_fake_quant=False, quantize_file_path="quant_params.json", ): """ Convert megengine model to ONNX, and save the ONNX model to file `output`. :param mge_fpath: the file path of megengine model. :type fpath: str :param output: the filename used for the saved model. :type output: str :param graph_name: the name of the ONNX graph. :type graph_name: str :param opset: opset version of ONNX model. :type opset: int """ if isinstance(traced_module, str): traced_module = mge.load(traced_module) assert isinstance( traced_module, TracedModule ), "Input should be a traced module or a path of traced module." _update_inputs_qparams( traced_module, input_data_type, input_scales, input_zero_points ) assert not require_quantize, "Caffe do not support quantize model." tm_resolver = TM_FrontEnd(traced_module, outspec=outspec) irgraph = tm_resolver.resolve() transformer_options = [ TransformerRule.REMOVE_RESHAPE_REALTED_OP, TransformerRule.REMOVE_UNRELATED_IROP, TransformerRule.EXPAND_CONVRELU, ] transformer = IRTransform(transformer_options) transformed_irgraph = transformer.transform(irgraph) quantizer = IRQuantizer( require_quantize=require_quantize, param_fake_quant=param_fake_quant ) if tm_resolver.has_qat: quantizer.save_quantize_params(transformed_irgraph) converter = OnnxConverter(transformed_irgraph, opset, graph_name, quantizer) model = converter.convert() if tm_resolver.has_qat: quantizer.dump_quant_param(path=quantize_file_path) assert isinstance(output, str), "onnx_fpath must be string" with open(output, "wb") as fout: fout.write(model.SerializeToString())
def tracedmodule_to_onnx( traced_module, output="out.onnx", *, graph_name="graph", opset=8, outspec=None, input_data_type: str = None, input_scales: Union[float, List[float]] = None, input_zero_points: Union[int, List[int]] = None, require_quantize=False, param_fake_quant=False, quantize_file_path="quant_params.json", ): """ Convert megengine model to ONNX, and save the ONNX model to file `output`. :param mge_fpath: the file path of megengine model. :type fpath: str :param output: the filename used for the saved model. :type output: str :param graph_name: the name of the ONNX graph. :type graph_name: str :param opset: opset version of ONNX model. :type opset: int """ if isinstance(traced_module, str): traced_module = mge.load(traced_module) assert isinstance( traced_module, TracedModule ), "Input should be a traced module or a path of traced module." _update_inputs_qparams( traced_module, input_data_type, input_scales, input_zero_points ) assert not require_quantize, "Caffe do not support quantize model." tm_resolver = TM_FrontEnd(traced_module, outspec=outspec) irgraph = tm_resolver.resolve() transformer_options = [ TransformerRule.REMOVE_RESHAPE_REALTED_OP, TransformerRule.REMOVE_UNRELATED_IROP, TransformerRule.EXPAND_CONVRELU, ] transformer = IRTransform(transformer_options) transformed_irgraph = transformer.transform(irgraph) quantizer = IRQuantizer( require_quantize=require_quantize, param_fake_quant=param_fake_quant ) if tm_resolver.has_qat: quantizer.save_quantize_params(transformed_irgraph) converter = OnnxConverter(transformed_irgraph, opset, graph_name, quantizer) model = converter.convert() if tm_resolver.has_qat: quantizer.dump_quant_param(path=quantize_file_path) assert isinstance(output, str), "onnx_fpath must be string" with open(output, "wb") as fout: fout.write(model.SerializeToString())
Python
def _check_dependency(outputs: List[IRTensor]): """ Check whether there exist one output depend on another output """ output_oprs = {var.owner_opr for var in outputs} output_input_oprs = { i.owner_opr for opr in _get_dep_opr(outputs) for i in opr.inp_tensors } if len(output_oprs) != len(outputs) or (output_oprs & output_input_oprs): raise ReorderError("Bad order due to dependency between two outputs.")
def _check_dependency(outputs: List[IRTensor]): """ Check whether there exist one output depend on another output """ output_oprs = {var.owner_opr for var in outputs} output_input_oprs = { i.owner_opr for opr in _get_dep_opr(outputs) for i in opr.inp_tensors } if len(output_oprs) != len(outputs) or (output_oprs & output_input_oprs): raise ReorderError("Bad order due to dependency between two outputs.")
Python
def _reorder_outputs(context: "CaffeConverter"): """ Try to keep same order with original network, but sometimes it is impossible, raise a ReorderError if it can't order output layers correctly. """ output_tensor = context.net.graph_outputs _check_dependency(output_tensor) blob2index = {} ordered_layers = [] output_layers = [None] * len(output_tensor) for i, oup in enumerate(output_tensor): blob = context.get_blob_name(oup) blob2index[blob] = i for l in context.layers: is_output = False for blob in l.top: idx = blob2index.get(blob, None) if idx is not None: if not is_output: is_output = True else: raise ReorderError( "layer {} has more than one network outputs".format(l) ) if output_layers[idx] is not None: raise ReorderError( "duplicated blob name of layer {} and {}".format( output_layers[idx], l ) ) output_layers[idx] = l if not is_output: ordered_layers.append(l) if output_layers.count(None) > 0: raise ReorderError("failure to replace all output vars.") ordered_layers += output_layers return ordered_layers
def _reorder_outputs(context: "CaffeConverter"): """ Try to keep same order with original network, but sometimes it is impossible, raise a ReorderError if it can't order output layers correctly. """ output_tensor = context.net.graph_outputs _check_dependency(output_tensor) blob2index = {} ordered_layers = [] output_layers = [None] * len(output_tensor) for i, oup in enumerate(output_tensor): blob = context.get_blob_name(oup) blob2index[blob] = i for l in context.layers: is_output = False for blob in l.top: idx = blob2index.get(blob, None) if idx is not None: if not is_output: is_output = True else: raise ReorderError( "layer {} has more than one network outputs".format(l) ) if output_layers[idx] is not None: raise ReorderError( "duplicated blob name of layer {} and {}".format( output_layers[idx], l ) ) output_layers[idx] = l if not is_output: ordered_layers.append(l) if output_layers.count(None) > 0: raise ReorderError("failure to replace all output vars.") ordered_layers += output_layers return ordered_layers
Python
def _get_inputs(self, exclude_idx=None): """ Returns the names of inputs of onnx operator. """ if exclude_idx is None: exclude_idx = [] for idx, inp in enumerate(self._opr.inp_tensors): if idx not in exclude_idx: if self._opr.inp_tensors[idx].np_data is not None: inp_tensor = onnx.helper.make_tensor_value_info( inp.name, mge2onnx_dtype_mapping[inp.dtype], inp.shape ) param = onnx.numpy_helper.from_array(inp.np_data, inp.name) self._net_sources.append(inp_tensor) self._parameters.append(param) return [tensor.name for tensor in self._opr.inp_tensors]
def _get_inputs(self, exclude_idx=None): """ Returns the names of inputs of onnx operator. """ if exclude_idx is None: exclude_idx = [] for idx, inp in enumerate(self._opr.inp_tensors): if idx not in exclude_idx: if self._opr.inp_tensors[idx].np_data is not None: inp_tensor = onnx.helper.make_tensor_value_info( inp.name, mge2onnx_dtype_mapping[inp.dtype], inp.shape ) param = onnx.numpy_helper.from_array(inp.np_data, inp.name) self._net_sources.append(inp_tensor) self._parameters.append(param) return [tensor.name for tensor in self._opr.inp_tensors]
Python
def _get_outputs(self): """ Returns the names of outputs of onnx operator. """ return [tensor.name for tensor in self._opr.out_tensors]
def _get_outputs(self): """ Returns the names of outputs of onnx operator. """ return [tensor.name for tensor in self._opr.out_tensors]
Python
def convert(self): """ Convert owning operator to onnx operator. Could be override by subclass. Returns tuple (nodes, net_sources, parameters) """ nodes = [ onnx.helper.make_node( self.__opr_type__, self._get_inputs(), self._get_outputs(), name=self._opr.out_tensors[0].name, **self._get_attrs(), ) ] return nodes, self._net_sources, self._parameters
def convert(self): """ Convert owning operator to onnx operator. Could be override by subclass. Returns tuple (nodes, net_sources, parameters) """ nodes = [ onnx.helper.make_node( self.__opr_type__, self._get_inputs(), self._get_outputs(), name=self._opr.out_tensors[0].name, **self._get_attrs(), ) ] return nodes, self._net_sources, self._parameters
Python
def tracedmodule_to_tflite( traced_module, output="out.tflite", *, input_data_type: str = None, input_scales: Union[float, List[float]] = None, input_zero_points: Union[int, List[int]] = None, require_quantize=False, param_fake_quant=False, quantize_file_path="quant_params.json", graph_name="graph", mtk=False, outspec=None, ): """ Convert traced model to TFLite, and save the TFLite model to file `output`. :param traced_module: a traced module or the file path of a traced module. :param output: the filename used for the saved model. :param data_type: data type of input :param graph_name: the name of the TFLite graph. :param mtk: if this TFLite will be run on mtk. :type mtk: bool """ if isinstance(traced_module, str): traced_module = mge.load(traced_module) assert isinstance( traced_module, TracedModule ), "Input should be a traced module or a path of traced module." _update_inputs_qparams( traced_module, input_data_type, input_scales, input_zero_points ) tm_resolver = TM_FrontEnd(traced_module, outspec=outspec) irgraph = tm_resolver.resolve() transformer_options = [ TransformerRule.REDUCE_AXIS_AS_INPUT, TransformerRule.PADDING_FOR_CONV_AND_POOLING, TransformerRule.EXPAND_CONVRELU, TransformerRule.CONV_ADD_ZERO_BIAS, TransformerRule.DECONV_SHAPE_AS_INPUT, TransformerRule.DEPTHWISE_CONV_RESHAPE_WEIGHT, TransformerRule.RESHAPE_BIAS_TO_1DIM, TransformerRule.FUSE_ACTIVATION, TransformerRule.SLICE_PARAMS_AS_INPUTS_AND_MAKE_SQUEEZE, TransformerRule.RESIZE_PARAMS_AS_INPUT, TransformerRule.TRANSPOSE_PATTERN_AS_INPUT, TransformerRule.FUSE_CONV_BN, TransformerRule.REMOVE_IDENTITY, TransformerRule.REPLACE_FLATTEN_TO_RESHAPE, ] if mtk: # MTK devices only support batch_size 1 set_platform("mtk") transformer_options.append(TransformerRule.DECONV_ADD_ZERO_BIAS,) transformer = IRTransform(transformer_options) transformed_irgraph = transformer.transform(irgraph) quantizer = IRQuantizer( require_quantize=require_quantize, param_fake_quant=param_fake_quant ) if not require_quantize and tm_resolver.has_qat: quantizer.save_quantize_params(transformed_irgraph) quantizer.dump_quant_param(path=quantize_file_path) converter = TFLiteConverter(transformed_irgraph, graph_name, quantizer=quantizer) model = converter.convert() assert isinstance(output, str), "tflite_fpath must be string" with open(output, "wb") as fout: fout.write(model)
def tracedmodule_to_tflite( traced_module, output="out.tflite", *, input_data_type: str = None, input_scales: Union[float, List[float]] = None, input_zero_points: Union[int, List[int]] = None, require_quantize=False, param_fake_quant=False, quantize_file_path="quant_params.json", graph_name="graph", mtk=False, outspec=None, ): """ Convert traced model to TFLite, and save the TFLite model to file `output`. :param traced_module: a traced module or the file path of a traced module. :param output: the filename used for the saved model. :param data_type: data type of input :param graph_name: the name of the TFLite graph. :param mtk: if this TFLite will be run on mtk. :type mtk: bool """ if isinstance(traced_module, str): traced_module = mge.load(traced_module) assert isinstance( traced_module, TracedModule ), "Input should be a traced module or a path of traced module." _update_inputs_qparams( traced_module, input_data_type, input_scales, input_zero_points ) tm_resolver = TM_FrontEnd(traced_module, outspec=outspec) irgraph = tm_resolver.resolve() transformer_options = [ TransformerRule.REDUCE_AXIS_AS_INPUT, TransformerRule.PADDING_FOR_CONV_AND_POOLING, TransformerRule.EXPAND_CONVRELU, TransformerRule.CONV_ADD_ZERO_BIAS, TransformerRule.DECONV_SHAPE_AS_INPUT, TransformerRule.DEPTHWISE_CONV_RESHAPE_WEIGHT, TransformerRule.RESHAPE_BIAS_TO_1DIM, TransformerRule.FUSE_ACTIVATION, TransformerRule.SLICE_PARAMS_AS_INPUTS_AND_MAKE_SQUEEZE, TransformerRule.RESIZE_PARAMS_AS_INPUT, TransformerRule.TRANSPOSE_PATTERN_AS_INPUT, TransformerRule.FUSE_CONV_BN, TransformerRule.REMOVE_IDENTITY, TransformerRule.REPLACE_FLATTEN_TO_RESHAPE, ] if mtk: # MTK devices only support batch_size 1 set_platform("mtk") transformer_options.append(TransformerRule.DECONV_ADD_ZERO_BIAS,) transformer = IRTransform(transformer_options) transformed_irgraph = transformer.transform(irgraph) quantizer = IRQuantizer( require_quantize=require_quantize, param_fake_quant=param_fake_quant ) if not require_quantize and tm_resolver.has_qat: quantizer.save_quantize_params(transformed_irgraph) quantizer.dump_quant_param(path=quantize_file_path) converter = TFLiteConverter(transformed_irgraph, graph_name, quantizer=quantizer) model = converter.convert() assert isinstance(output, str), "tflite_fpath must be string" with open(output, "wb") as fout: fout.write(model)
Python
def tracedmodule_to_caffe( traced_module, prototxt="out.prototxt", caffemodel="out.caffemodel", outspec=None, use_empty_blobs=False, input_data_type: str = None, input_scales: Union[float, List[float]] = None, input_zero_points: Union[int, List[int]] = None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, quantize_file_path="quant_params.json", convert_backend: BackEnd = BackEnd.CAFFE, ): """ Convert TracedModule model to Caffe, and save caffe model to `prototxt` and `caffemodel`. :param traced_module: the file path of TracedModule model. :type traced_module: str :param prototxt: the filename used for saved model definition. :type prototxt: str :param caffemodel: the filename used for saved model weights. :type caffemodel: str :param outspec: specify the end points of the model, expect the full names of nodes. :type outspec: list """ if isinstance(traced_module, str): traced_module = mge.load(traced_module) assert isinstance( traced_module, TracedModule ), "Input should be a traced module or a path of traced module." assert not require_quantize, "Caffe do not support quantize model." _update_inputs_qparams( traced_module, input_data_type, input_scales, input_zero_points ) tm_resolver = TM_FrontEnd(traced_module, outspec=outspec) irgraph = tm_resolver.resolve() transformer_options = [ TransformerRule.REMOVE_DROPOUT, TransformerRule.REMOVE_RESHAPE_REALTED_OP, TransformerRule.REMOVE_UNRELATED_IROP, TransformerRule.ADD_FAKE_HSIGMOID_OUT, TransformerRule.EXPAND_CONVRELU, ] if fuse_bn: transformer_options += [ TransformerRule.FUSE_LINEAR_BN, TransformerRule.FUSE_CONV_BN, ] if convert_backend == BackEnd.NNIE: transformer_options.extend( [TransformerRule.REMOVE_FLATTEN_BEFORE_LINEAR,] ) if split_conv_relu: transformer_options += [TransformerRule.REMOVE_RELU] transformer = IRTransform(transformer_options) transformed_irgraph = transformer.transform(irgraph) quantizer = IRQuantizer( require_quantize=require_quantize, param_fake_quant=param_fake_quant ) if tm_resolver.has_qat: quantizer.save_quantize_params(transformed_irgraph) converter = CaffeConverter( transformed_irgraph, quantizer, use_empty_blobs, convert_backend ) converter.convert() if tm_resolver.has_qat: quantizer.dump_quant_param(path=quantize_file_path) assert isinstance(prototxt, str) and isinstance( caffemodel, str ), "'prototxt' and 'caffemodel' must be string" converter.dump(prototxt, caffemodel)
def tracedmodule_to_caffe( traced_module, prototxt="out.prototxt", caffemodel="out.caffemodel", outspec=None, use_empty_blobs=False, input_data_type: str = None, input_scales: Union[float, List[float]] = None, input_zero_points: Union[int, List[int]] = None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, quantize_file_path="quant_params.json", convert_backend: BackEnd = BackEnd.CAFFE, ): """ Convert TracedModule model to Caffe, and save caffe model to `prototxt` and `caffemodel`. :param traced_module: the file path of TracedModule model. :type traced_module: str :param prototxt: the filename used for saved model definition. :type prototxt: str :param caffemodel: the filename used for saved model weights. :type caffemodel: str :param outspec: specify the end points of the model, expect the full names of nodes. :type outspec: list """ if isinstance(traced_module, str): traced_module = mge.load(traced_module) assert isinstance( traced_module, TracedModule ), "Input should be a traced module or a path of traced module." assert not require_quantize, "Caffe do not support quantize model." _update_inputs_qparams( traced_module, input_data_type, input_scales, input_zero_points ) tm_resolver = TM_FrontEnd(traced_module, outspec=outspec) irgraph = tm_resolver.resolve() transformer_options = [ TransformerRule.REMOVE_DROPOUT, TransformerRule.REMOVE_RESHAPE_REALTED_OP, TransformerRule.REMOVE_UNRELATED_IROP, TransformerRule.ADD_FAKE_HSIGMOID_OUT, TransformerRule.EXPAND_CONVRELU, ] if fuse_bn: transformer_options += [ TransformerRule.FUSE_LINEAR_BN, TransformerRule.FUSE_CONV_BN, ] if convert_backend == BackEnd.NNIE: transformer_options.extend( [TransformerRule.REMOVE_FLATTEN_BEFORE_LINEAR,] ) if split_conv_relu: transformer_options += [TransformerRule.REMOVE_RELU] transformer = IRTransform(transformer_options) transformed_irgraph = transformer.transform(irgraph) quantizer = IRQuantizer( require_quantize=require_quantize, param_fake_quant=param_fake_quant ) if tm_resolver.has_qat: quantizer.save_quantize_params(transformed_irgraph) converter = CaffeConverter( transformed_irgraph, quantizer, use_empty_blobs, convert_backend ) converter.convert() if tm_resolver.has_qat: quantizer.dump_quant_param(path=quantize_file_path) assert isinstance(prototxt, str) and isinstance( caffemodel, str ), "'prototxt' and 'caffemodel' must be string" converter.dump(prototxt, caffemodel)
Python
def repo_dir(repo, *path, mkdir=False): """creates directories mentioned in *path if absent""" path = repo_path(repo, *path) if os.path.exists(path): if (os.path.isdir(path)): return path else: raise Exception("Not a directory %s" % path) if mkdir: os.makedirs(path) return path else: return None
def repo_dir(repo, *path, mkdir=False): """creates directories mentioned in *path if absent""" path = repo_path(repo, *path) if os.path.exists(path): if (os.path.isdir(path)): return path else: raise Exception("Not a directory %s" % path) if mkdir: os.makedirs(path) return path else: return None
Python
def repo_create(path): """Create a new repository at path""" repo = vcsRepository(path, True) # create a vcs repo object # first check if path provided exists or is an empty directory if os.path.exists(repo.worktree): if not os.path.isdir(repo.worktree): raise Exception("%s is not a directory" % path) else: os.makedirs(repo.worktree) assert(repo_dir(repo, "branches", mkdir=True)) assert(repo_dir(repo, "objects", mkdir=True)) assert(repo_dir(repo, "refs", "tags", mkdir=True)) assert(repo_dir(repo, "refs", "heads", mkdir=True)) # .vcs/description with open(repo_file(repo, "description"), "w") as f: f.write("Unnamed repository; edit this desscription to name the repository\n") # .vcs/HEAD with open(repo_file(repo, "HEAD"), "w") as f: f.write("ref: refs/heads/master\n") # write default content to vcs config file with open(repo_file(repo, "config"), "w") as f: config = repo_default_config() config.write(f) with open(repo_file(repo, "userInfo"), "w") as f: config = repo_userInfo_config() config.write(f) return repo
def repo_create(path): """Create a new repository at path""" repo = vcsRepository(path, True) # create a vcs repo object # first check if path provided exists or is an empty directory if os.path.exists(repo.worktree): if not os.path.isdir(repo.worktree): raise Exception("%s is not a directory" % path) else: os.makedirs(repo.worktree) assert(repo_dir(repo, "branches", mkdir=True)) assert(repo_dir(repo, "objects", mkdir=True)) assert(repo_dir(repo, "refs", "tags", mkdir=True)) assert(repo_dir(repo, "refs", "heads", mkdir=True)) # .vcs/description with open(repo_file(repo, "description"), "w") as f: f.write("Unnamed repository; edit this desscription to name the repository\n") # .vcs/HEAD with open(repo_file(repo, "HEAD"), "w") as f: f.write("ref: refs/heads/master\n") # write default content to vcs config file with open(repo_file(repo, "config"), "w") as f: config = repo_default_config() config.write(f) with open(repo_file(repo, "userInfo"), "w") as f: config = repo_userInfo_config() config.write(f) return repo
Python
def repo_default_config(): """Defines the config file structure and returns the configparser object""" # config files is of microsoft INI format ret = configparser.ConfigParser() ret.add_section("core") ret.set("core", "repositoryformatversion", "0") ret.set("core", "filemode", "false") ret.set("core", "bare", "false") return ret
def repo_default_config(): """Defines the config file structure and returns the configparser object""" # config files is of microsoft INI format ret = configparser.ConfigParser() ret.add_section("core") ret.set("core", "repositoryformatversion", "0") ret.set("core", "filemode", "false") ret.set("core", "bare", "false") return ret
Python
def repo_userInfo_config(): """Defines the config file structure and returns the configparser object for UserInfo file""" # config files is of microsoft INI format ret = configparser.ConfigParser() ret.add_section("info") ret.set("info", "name", "") ret.set("info", "email", "") return ret
def repo_userInfo_config(): """Defines the config file structure and returns the configparser object for UserInfo file""" # config files is of microsoft INI format ret = configparser.ConfigParser() ret.add_section("info") ret.set("info", "name", "") ret.set("info", "email", "") return ret
Python
def repo_find(path=".", required=True): """Method to return path to .vcs directory by looking in current directory or recursively in it's parents""" """Required to prevent creation of redundant .vcs directory for a repo if it's parent directory already has a .vcs directory""" path = os.path.realpath(path) if os.path.isdir(os.path.join(path,".vcs")): return vcsRepository(path) parent = os.path.realpath(os.path.join(path, "..")) if parent == path: # checking the condition when we reach root "/" # "/../" == "/" if required: raise Exception("No .vcs directory") else: return None # recursive call return repo_find(parent, required)
def repo_find(path=".", required=True): """Method to return path to .vcs directory by looking in current directory or recursively in it's parents""" """Required to prevent creation of redundant .vcs directory for a repo if it's parent directory already has a .vcs directory""" path = os.path.realpath(path) if os.path.isdir(os.path.join(path,".vcs")): return vcsRepository(path) parent = os.path.realpath(os.path.join(path, "..")) if parent == path: # checking the condition when we reach root "/" # "/../" == "/" if required: raise Exception("No .vcs directory") else: return None # recursive call return repo_find(parent, required)
Python
def serialize(self): """ This function must be implemented by various subclasses. Since, objects are of different types on the basis of the data it is storing, different object types are required and thus different function and classes""" """Read objects content from self.data, a byte string, and do whatever it takes to convert it to a meaningful representation""" raise Exception("Unimplemented!!")
def serialize(self): """ This function must be implemented by various subclasses. Since, objects are of different types on the basis of the data it is storing, different object types are required and thus different function and classes""" """Read objects content from self.data, a byte string, and do whatever it takes to convert it to a meaningful representation""" raise Exception("Unimplemented!!")
Python
def keyValueMessageParser(original, start=0, dct=None): """Recursive function which parses a commit or a tag message and extracts key value pairs and messages""" if not dct: dct = collections.OrderedDict() # original is a byte string of the commit or tag message spaceIndex = original.find(b' ', start) newlineIndex = original.find(b'\n', start) # if newline arrives before space, then the line must be a empty line # thus, it means remainder of the data is a message # the if case handles the situation when a blank line is reached and strings after this will be the message if (spaceIndex < 0 or newlineIndex < spaceIndex): assert(newlineIndex == start) dct[b''] = original[start+1:] return dct # handling the case before blank line is reached, thus parsing key value is needed key = original[start:spaceIndex] end = start while True: end = original.find(b'\n', end+1) if original[end+1] != ord(' '): break value = original[spaceIndex+1:end] value.replace(b'\n ', b'\n') if key in dct: if type(dct[key]) == list: dct[key].append(value) else: dct[key] = [dct[key], value] else: dct[key] = [value] # recursive function to extract the next key value pair or message return keyValueMessageParser(original, end+1, dct)
def keyValueMessageParser(original, start=0, dct=None): """Recursive function which parses a commit or a tag message and extracts key value pairs and messages""" if not dct: dct = collections.OrderedDict() # original is a byte string of the commit or tag message spaceIndex = original.find(b' ', start) newlineIndex = original.find(b'\n', start) # if newline arrives before space, then the line must be a empty line # thus, it means remainder of the data is a message # the if case handles the situation when a blank line is reached and strings after this will be the message if (spaceIndex < 0 or newlineIndex < spaceIndex): assert(newlineIndex == start) dct[b''] = original[start+1:] return dct # handling the case before blank line is reached, thus parsing key value is needed key = original[start:spaceIndex] end = start while True: end = original.find(b'\n', end+1) if original[end+1] != ord(' '): break value = original[spaceIndex+1:end] value.replace(b'\n ', b'\n') if key in dct: if type(dct[key]) == list: dct[key].append(value) else: dct[key] = [dct[key], value] else: dct[key] = [value] # recursive function to extract the next key value pair or message return keyValueMessageParser(original, end+1, dct)
Python
def keyValueMessageSerialize(keyValueDict): """Function which forms the original commit message from the keyValue dictionary formed by keyvalueParser()""" res = b'' for keys in keyValueDict.keys(): if (keys == b''): continue val = keyValueDict[keys] if type(val) != list: val = [val] # adding the key value pairs and recreating the original format for elements in val: res += keys + b' ' + elements.replace(b'\n', b'\n ') + b'\n' # adding the blank line and the message after res += b'\n' + keyValueDict[b''] return res
def keyValueMessageSerialize(keyValueDict): """Function which forms the original commit message from the keyValue dictionary formed by keyvalueParser()""" res = b'' for keys in keyValueDict.keys(): if (keys == b''): continue val = keyValueDict[keys] if type(val) != list: val = [val] # adding the key value pairs and recreating the original format for elements in val: res += keys + b' ' + elements.replace(b'\n', b'\n ') + b'\n' # adding the blank line and the message after res += b'\n' + keyValueDict[b''] return res
Python
def object_read(repo, sha): """Read object object_id from vcs repository repo. Return a vcs object whose exact type depends on the object""" path = repo_file(repo, "objects", sha[0:2], sha[2:]) with open(path, "rb") as f: raw = zlib.decompress(f.read()) # computing the starting position of the whitespace in header of the object file x = raw.find(b' ') fmt = raw[0:x] # the type of object in byte type # read and validate object size y = raw.find(b'\x00', x) size = int(raw[x:y].decode("ascii")) if size != len(raw) - y - 1: raise Exception("Malformed object {0}: bad length".format(sha)) # picking proper vcs object class if fmt == b'commit' : c = vcsCommit elif fmt == b'tree' : c = vcsTree elif fmt == b'tag' : c = vcsTag elif fmt == b'blob' : c = vcsBlob else: raise Exception("Unknown type %s for object %s".format(fmt.decode("ascii"), sha)) # return object of the class picked above return c(repo, raw[y+1:])
def object_read(repo, sha): """Read object object_id from vcs repository repo. Return a vcs object whose exact type depends on the object""" path = repo_file(repo, "objects", sha[0:2], sha[2:]) with open(path, "rb") as f: raw = zlib.decompress(f.read()) # computing the starting position of the whitespace in header of the object file x = raw.find(b' ') fmt = raw[0:x] # the type of object in byte type # read and validate object size y = raw.find(b'\x00', x) size = int(raw[x:y].decode("ascii")) if size != len(raw) - y - 1: raise Exception("Malformed object {0}: bad length".format(sha)) # picking proper vcs object class if fmt == b'commit' : c = vcsCommit elif fmt == b'tree' : c = vcsTree elif fmt == b'tag' : c = vcsTag elif fmt == b'blob' : c = vcsBlob else: raise Exception("Unknown type %s for object %s".format(fmt.decode("ascii"), sha)) # return object of the class picked above return c(repo, raw[y+1:])
Python
def object_write(obj, actually_write=True): """Creates the vcs object of input data and writes it to a file in compressed form if actually_write is True""" # Serialize object data if obj.fmt == b'commit': data = obj.serialize(obj.commitData) # get the content in byte string format else: data = obj.serialize() if type(data) == str: data = data.encode() # add header result = obj.fmt + b' ' + str(len(data)).encode() + b'\x00' + data # compute hash sha = hashlib.sha1(result).hexdigest() if actually_write: path = repo_file(obj.repo, "objects", sha[0:2], sha[2:], mkdir=actually_write) with open(path, "wb") as f: # compress the data and write f.write(zlib.compress(result)) return sha
def object_write(obj, actually_write=True): """Creates the vcs object of input data and writes it to a file in compressed form if actually_write is True""" # Serialize object data if obj.fmt == b'commit': data = obj.serialize(obj.commitData) # get the content in byte string format else: data = obj.serialize() if type(data) == str: data = data.encode() # add header result = obj.fmt + b' ' + str(len(data)).encode() + b'\x00' + data # compute hash sha = hashlib.sha1(result).hexdigest() if actually_write: path = repo_file(obj.repo, "objects", sha[0:2], sha[2:], mkdir=actually_write) with open(path, "wb") as f: # compress the data and write f.write(zlib.compress(result)) return sha
Python
def object_find(repo, name, fmt=None, follow=True): """ A name resolution function: Since a vcs object can be refered through various ways such as full hash, short hash, tag etc""" # unimplemented now (placeholder function) --> will be implemented later if name == "HEAD": return ref_resolve(repo, name) else: return name
def object_find(repo, name, fmt=None, follow=True): """ A name resolution function: Since a vcs object can be refered through various ways such as full hash, short hash, tag etc""" # unimplemented now (placeholder function) --> will be implemented later if name == "HEAD": return ref_resolve(repo, name) else: return name
Python
def tree_parse_one(raw, start=0): """ Function to parse a single record in the tree object""" # finding the space terminator of the mode x = raw.find(b' ', start) # checking if mode provided is correct or not assert(x-start == 5 or x-start == 6) # read the mode mode = raw[start:x] # finding the NULL terminator of the path y = raw.find(b'\x00', x) # and read the path path = raw[x+1:y] # Read the SHA and convert to an hex string sha = hex(int.from_bytes(raw[y+1:y+21], "big")) # removing the "0x" substring from the starting of the hex string sha = sha[2:] return y+21, vcsTreeLeaf(mode, path, sha)
def tree_parse_one(raw, start=0): """ Function to parse a single record in the tree object""" # finding the space terminator of the mode x = raw.find(b' ', start) # checking if mode provided is correct or not assert(x-start == 5 or x-start == 6) # read the mode mode = raw[start:x] # finding the NULL terminator of the path y = raw.find(b'\x00', x) # and read the path path = raw[x+1:y] # Read the SHA and convert to an hex string sha = hex(int.from_bytes(raw[y+1:y+21], "big")) # removing the "0x" substring from the starting of the hex string sha = sha[2:] return y+21, vcsTreeLeaf(mode, path, sha)
Python
def parse_tree(raw): """ Function to parse the whole tree""" pos = 0 max_len = len(raw) res = list() while pos < max_len: pos, data = tree_parse_one(raw, pos) res.append(data) return res
def parse_tree(raw): """ Function to parse the whole tree""" pos = 0 max_len = len(raw) res = list() while pos < max_len: pos, data = tree_parse_one(raw, pos) res.append(data) return res
Python
def tree_serialize(obj): """Function to serialize a tree object""" res = b'' for i in obj.items: res += i.mode res += b' ' res += i.path res += b'\x00' sha = int(i.sha, 16) res += sha.to_bytes(20, byteorder="big") return res
def tree_serialize(obj): """Function to serialize a tree object""" res = b'' for i in obj.items: res += i.mode res += b' ' res += i.path res += b'\x00' sha = int(i.sha, 16) res += sha.to_bytes(20, byteorder="big") return res
Python
def object_hash(fd, fmt, repo=None): """ Function to read the content of a open file, create appropiate object and write the object to vcs directory and return the hash of the file""" data = fd.read() # choosing constructor on the basis of the object type found in header if fmt == b'commit' : obj = vcsCommit(repo, data) elif fmt == b'tree' : obj = vcsTree(repo, data) elif fmt == b'tag' : obj = vcsTag(repo, data) elif fmt == b'blob' : obj = vcsBlob(repo, data) else: raise Exception('Unknown type %s!' % fmt) return object_write(obj, repo)
def object_hash(fd, fmt, repo=None): """ Function to read the content of a open file, create appropiate object and write the object to vcs directory and return the hash of the file""" data = fd.read() # choosing constructor on the basis of the object type found in header if fmt == b'commit' : obj = vcsCommit(repo, data) elif fmt == b'tree' : obj = vcsTree(repo, data) elif fmt == b'tag' : obj = vcsTag(repo, data) elif fmt == b'blob' : obj = vcsBlob(repo, data) else: raise Exception('Unknown type %s!' % fmt) return object_write(obj, repo)
Python
def logGraph(repo, sha, seen): """ Function to print the log of commits by traversing the graph""" # seen is a set which stores all the commits which are already visited, thus preventing any circular loop situation in # graph travers al. if sha in seen: return seen.add(sha) commit = object_read(repo, sha) # assertion to check, if the object desrialized is a commit object assert(commit.fmt == b'commit') if not b'parent' in commit.commitData.keys(): # the first commit return parents = commit.commitData[b'parent'] if type(parents) != list: parents = [parents] for p in parents: # as data is kept in objects in byte string format p = p.decode('ascii') print("c_{0} -> c_{1}".format(sha, p)) logGraph(repo, p, seen)
def logGraph(repo, sha, seen): """ Function to print the log of commits by traversing the graph""" # seen is a set which stores all the commits which are already visited, thus preventing any circular loop situation in # graph travers al. if sha in seen: return seen.add(sha) commit = object_read(repo, sha) # assertion to check, if the object desrialized is a commit object assert(commit.fmt == b'commit') if not b'parent' in commit.commitData.keys(): # the first commit return parents = commit.commitData[b'parent'] if type(parents) != list: parents = [parents] for p in parents: # as data is kept in objects in byte string format p = p.decode('ascii') print("c_{0} -> c_{1}".format(sha, p)) logGraph(repo, p, seen)
Python
def commitLog(repo, sha, seen): """Function to pretty print the log of commits in the terminal""" if sha in seen.keys(): return seen[sha] = object_read(repo, sha) # assertion to check, if the object deserialized is a commit object assert(seen[sha].fmt == b'commit') print("\n") print("commit id: {0}{1}".format(sha, " (HEAD)" if (len(seen.keys()) == 1) else "")) print("commit message: {0}".format(seen[sha].commitData[b''].decode('ascii'))) if not b'parent' in seen[sha].commitData.keys(): return parents = seen[sha].commitData[b'parent'] if type(parents) != list: parents = [parents] for p in parents: p = p.decode('ascii') commitLog(repo, p, seen)
def commitLog(repo, sha, seen): """Function to pretty print the log of commits in the terminal""" if sha in seen.keys(): return seen[sha] = object_read(repo, sha) # assertion to check, if the object deserialized is a commit object assert(seen[sha].fmt == b'commit') print("\n") print("commit id: {0}{1}".format(sha, " (HEAD)" if (len(seen.keys()) == 1) else "")) print("commit message: {0}".format(seen[sha].commitData[b''].decode('ascii'))) if not b'parent' in seen[sha].commitData.keys(): return parents = seen[sha].commitData[b'parent'] if type(parents) != list: parents = [parents] for p in parents: p = p.decode('ascii') commitLog(repo, p, seen)
Python
def tree_checkout(repo, tree, path): """Recursively instantiates a tree during checkout into a empty directory""" for item in tree.items: obj = object_read(repo, item.sha) rootPath = os.path.realpath(os.path.join(repo.vcsdir, '../')) relativePath = item.path.decode().replace(rootPath, '') destRootPath = path dest = destRootPath + relativePath.encode() if obj.fmt == b'tree': os.mkdir(dest) # recursively call if the object is tree tree_checkout(repo, obj, destRootPath) elif obj.fmt == b'blob': with open(dest, 'wb') as f: f.write(obj.blobdata)
def tree_checkout(repo, tree, path): """Recursively instantiates a tree during checkout into a empty directory""" for item in tree.items: obj = object_read(repo, item.sha) rootPath = os.path.realpath(os.path.join(repo.vcsdir, '../')) relativePath = item.path.decode().replace(rootPath, '') destRootPath = path dest = destRootPath + relativePath.encode() if obj.fmt == b'tree': os.mkdir(dest) # recursively call if the object is tree tree_checkout(repo, obj, destRootPath) elif obj.fmt == b'blob': with open(dest, 'wb') as f: f.write(obj.blobdata)
Python
def createTree(path=None, actually_write=True, verbose=False): """Creates a tree object of the whole repo""" blackList = ['libvcs.py', 'vcs', '__pycache__', '.vcs'] repo = repo_find() if (path == None): path = repo.worktree content = os.listdir(path) treeContent = list() for files in content: dest = os.path.join(path, files) if os.path.isfile(dest): if files not in blackList: with open(dest, "rb") as f: data = f.read() blobObj = vcsBlob(repo, data) sha = object_write(blobObj, actually_write) mode = str(os.stat(dest).st_mode).encode() leafObj = vcsTreeLeaf(mode, dest.encode(), sha) treeContent.append(leafObj) if verbose: print("Added {0} to current commit".format(dest)) print("sha of {0} ----> {1}\n".format(dest, sha)) else: # when the given files is a directory if files not in blackList: leafObj = createTree(dest) treeContent.append(leafObj) treeObj = vcsTree(repo) treeObj.items = treeContent sha = object_write(treeObj, actually_write) if path == repo.worktree: return sha else: mode = str(os.stat(path).st_mode).encode() leafObj = vcsTreeLeaf(mode, path.encode(), sha) return leafObj
def createTree(path=None, actually_write=True, verbose=False): """Creates a tree object of the whole repo""" blackList = ['libvcs.py', 'vcs', '__pycache__', '.vcs'] repo = repo_find() if (path == None): path = repo.worktree content = os.listdir(path) treeContent = list() for files in content: dest = os.path.join(path, files) if os.path.isfile(dest): if files not in blackList: with open(dest, "rb") as f: data = f.read() blobObj = vcsBlob(repo, data) sha = object_write(blobObj, actually_write) mode = str(os.stat(dest).st_mode).encode() leafObj = vcsTreeLeaf(mode, dest.encode(), sha) treeContent.append(leafObj) if verbose: print("Added {0} to current commit".format(dest)) print("sha of {0} ----> {1}\n".format(dest, sha)) else: # when the given files is a directory if files not in blackList: leafObj = createTree(dest) treeContent.append(leafObj) treeObj = vcsTree(repo) treeObj.items = treeContent sha = object_write(treeObj, actually_write) if path == repo.worktree: return sha else: mode = str(os.stat(path).st_mode).encode() leafObj = vcsTreeLeaf(mode, path.encode(), sha) return leafObj
Python
def promptUserInfo(): """Function to prompt user to set user name and email id if missing""" repo = repo_find() path = repo_file(repo, "userInfo") if not os.path.exists(path): raise Exception("{0} doesn't exist".format(path)) # this exception for the case if a directory of the same name is created in .vcs but file doesn't exist if not os.path.isfile(path): raise Exception("{0} is not a file".format(path)) f = open(path) content = f.read() parser = configparser.ConfigParser() parser.read_string(content) if not parser["info"]["name"]: print("User name missing") print("Set user name using command vcs set --name [username]") exit() if not parser["info"]["email"]: print("User email missing") print("Set user email using command vcs set --email [email id]") exit()
def promptUserInfo(): """Function to prompt user to set user name and email id if missing""" repo = repo_find() path = repo_file(repo, "userInfo") if not os.path.exists(path): raise Exception("{0} doesn't exist".format(path)) # this exception for the case if a directory of the same name is created in .vcs but file doesn't exist if not os.path.isfile(path): raise Exception("{0} is not a file".format(path)) f = open(path) content = f.read() parser = configparser.ConfigParser() parser.read_string(content) if not parser["info"]["name"]: print("User name missing") print("Set user name using command vcs set --name [username]") exit() if not parser["info"]["email"]: print("User email missing") print("Set user email using command vcs set --email [email id]") exit()
Python
def ref_resolve(repo, ref): """Recursively finds the sha-1 of objects referenced by a ref""" if not os.path.exists(os.path.realpath(os.path.join(repo.vcsdir, ref, '..'))): raise Exception("Directory missing: {0}".format(os.path.realpath(os.path.join(repo.vcsdir, ref, '..')))) if not os.path.isfile(repo_file(repo, ref)): return "" with open(repo_file(repo, ref), 'r') as fp: data = fp.read()[:-1] # reject the '\n' at the end of the string if data.startswith("ref: "): return ref_resolve(repo, data[5:]) else: return data
def ref_resolve(repo, ref): """Recursively finds the sha-1 of objects referenced by a ref""" if not os.path.exists(os.path.realpath(os.path.join(repo.vcsdir, ref, '..'))): raise Exception("Directory missing: {0}".format(os.path.realpath(os.path.join(repo.vcsdir, ref, '..')))) if not os.path.isfile(repo_file(repo, ref)): return "" with open(repo_file(repo, ref), 'r') as fp: data = fp.read()[:-1] # reject the '\n' at the end of the string if data.startswith("ref: "): return ref_resolve(repo, data[5:]) else: return data
Python
def ref_list(repo, path=None): """Function which returns a sorted ordered dictionary of references and sha values they represent""" if not path: path = repo_dir(repo, "refs") ret = collections.OrderedDict() for f in sorted(os.listdir(path)): ref_path = os.path.join(path, f) if os.path.isdir(ref_path): ret[f] = ref_list(repo, ref_path) else: ret[f] = ref_resolve(repo, ref_path) return ret
def ref_list(repo, path=None): """Function which returns a sorted ordered dictionary of references and sha values they represent""" if not path: path = repo_dir(repo, "refs") ret = collections.OrderedDict() for f in sorted(os.listdir(path)): ref_path = os.path.join(path, f) if os.path.isdir(ref_path): ret[f] = ref_list(repo, ref_path) else: ret[f] = ref_resolve(repo, ref_path) return ret
Python
def cmd_hash_object(args): """calling function for hash-object command""" if args.write: repo = vcsRepository(".") else: repo = None with open(args.path, "rb") as f: sha = object_hash(f, args.type.encode(), repo) print(sha)
def cmd_hash_object(args): """calling function for hash-object command""" if args.write: repo = vcsRepository(".") else: repo = None with open(args.path, "rb") as f: sha = object_hash(f, args.type.encode(), repo) print(sha)
Python
def cmd_ls_tree(args): """ Calling function for ls-tree command""" repo = repo_find() obj = object_read(repo, object_find(repo, args.object, fmt=b'tree')) for item in obj.items: print("{0} {1} {2}\t{3}".format( "0" * (6 - len(item.mode)) + item.mode.decode("ascii"), object_read(repo, item.sha).fmt.decode("ascii"), item.sha, item.path.decode("ascii")))
def cmd_ls_tree(args): """ Calling function for ls-tree command""" repo = repo_find() obj = object_read(repo, object_find(repo, args.object, fmt=b'tree')) for item in obj.items: print("{0} {1} {2}\t{3}".format( "0" * (6 - len(item.mode)) + item.mode.decode("ascii"), object_read(repo, item.sha).fmt.decode("ascii"), item.sha, item.path.decode("ascii")))
Python
def cmd_checkout(args): """ Calling function for vcs checkout command""" repo = repo_find() # deserializing the commit byte data and forming the commit object obj = object_read(repo, object_find(repo, args.commit)) # deserializing the tree byte object mentioned in commit object if obj.fmt == b'commit': obj = object_read(repo, obj.commitData[b'tree'][0].decode("ascii")) # verify that the path mentioned in the argument is empty if os.path.exists(args.path): if not os.path.isdir(args.path): raise Exception("{0} is not a directory".format(args.path)) if os.listdir(args.path): raise Exception("Directory is not empty") else: os.makedirs(args.path) tree_checkout(repo, obj, os.path.realpath(args.path).encode())
def cmd_checkout(args): """ Calling function for vcs checkout command""" repo = repo_find() # deserializing the commit byte data and forming the commit object obj = object_read(repo, object_find(repo, args.commit)) # deserializing the tree byte object mentioned in commit object if obj.fmt == b'commit': obj = object_read(repo, obj.commitData[b'tree'][0].decode("ascii")) # verify that the path mentioned in the argument is empty if os.path.exists(args.path): if not os.path.isdir(args.path): raise Exception("{0} is not a directory".format(args.path)) if os.listdir(args.path): raise Exception("Directory is not empty") else: os.makedirs(args.path) tree_checkout(repo, obj, os.path.realpath(args.path).encode())
Python
def cmd_commit(args): """calling function for vcs commit function""" if not args.message: raise Exception("Commit message is empty") # check if user email and name is present or not promptUserInfo() repo = repo_find() dct = collections.OrderedDict() treeHash = createTree(verbose=args.verbose).encode() headPath = os.path.join(repo.vcsdir, "HEAD") if not os.path.exists(headPath): raise Exception("{0} doesn't exist".format(headPath)) if not os.path.isfile(headPath): raise Exception("{0} is not a file".format(headPath)) # HEAD file is expected to have commit hash in ascii string format headCommitHash = ref_resolve(repo, 'HEAD') # -------------- DEBUG ---------------------- # print("Head commit hash --> " + headCommitHash) # ------------------------------------------- if headCommitHash: # check if the commit hash (sha-1) exists or not if not os.path.isfile(os.path.join(repo.vcsdir, "objects", headCommitHash[:2], headCommitHash[2:])): raise Exception("Commit pointed by HEAD --> {0} doesn't exist".format(headCommitHash)) # check if the hash in HEAD is a commit object hash fmt = getObjectFormat(repo, headCommitHash) if fmt != "commit": raise Exception("Object pointed by HEAD --> {0} is not a commit".format(headCommitHash)) # check if changes been made in worktree since last commit obj = object_read(repo, headCommitHash) if obj.commitData[b'tree'][0] == treeHash: print('Nothing to commit. No change in worktree since last commit ({0})'.format(headCommitHash)) return name, email = getUserInfo() # todo: add optional pgp signature component of commit dct[b'tree'] = treeHash if headCommitHash: # add parent if headCommitHash is not empty,ie, first commit will not have a parent commit entry dct[b'parent'] = headCommitHash.encode() dct[b'author'] = name.encode() + b' ' + email.encode() if not args.a: dct[b'committer'] = name.encode() + b' ' + email.encode() dct[b''] = args.message.encode() commitObj = vcsCommit(repo) commitObj.commitData = dct sha = object_write(commitObj, True) # updating the HEAD commit update_master(repo, sha) print("commit hash: {0}".format(sha)) print("commit message: {0}\n".format(args.message))
def cmd_commit(args): """calling function for vcs commit function""" if not args.message: raise Exception("Commit message is empty") # check if user email and name is present or not promptUserInfo() repo = repo_find() dct = collections.OrderedDict() treeHash = createTree(verbose=args.verbose).encode() headPath = os.path.join(repo.vcsdir, "HEAD") if not os.path.exists(headPath): raise Exception("{0} doesn't exist".format(headPath)) if not os.path.isfile(headPath): raise Exception("{0} is not a file".format(headPath)) # HEAD file is expected to have commit hash in ascii string format headCommitHash = ref_resolve(repo, 'HEAD') # -------------- DEBUG ---------------------- # print("Head commit hash --> " + headCommitHash) # ------------------------------------------- if headCommitHash: # check if the commit hash (sha-1) exists or not if not os.path.isfile(os.path.join(repo.vcsdir, "objects", headCommitHash[:2], headCommitHash[2:])): raise Exception("Commit pointed by HEAD --> {0} doesn't exist".format(headCommitHash)) # check if the hash in HEAD is a commit object hash fmt = getObjectFormat(repo, headCommitHash) if fmt != "commit": raise Exception("Object pointed by HEAD --> {0} is not a commit".format(headCommitHash)) # check if changes been made in worktree since last commit obj = object_read(repo, headCommitHash) if obj.commitData[b'tree'][0] == treeHash: print('Nothing to commit. No change in worktree since last commit ({0})'.format(headCommitHash)) return name, email = getUserInfo() # todo: add optional pgp signature component of commit dct[b'tree'] = treeHash if headCommitHash: # add parent if headCommitHash is not empty,ie, first commit will not have a parent commit entry dct[b'parent'] = headCommitHash.encode() dct[b'author'] = name.encode() + b' ' + email.encode() if not args.a: dct[b'committer'] = name.encode() + b' ' + email.encode() dct[b''] = args.message.encode() commitObj = vcsCommit(repo) commitObj.commitData = dct sha = object_write(commitObj, True) # updating the HEAD commit update_master(repo, sha) print("commit hash: {0}".format(sha)) print("commit message: {0}\n".format(args.message))
Python
def cmd_show_ref(args): """Calling function for vcs show-ref command""" repo = repo_find() refs = ref_list(repo) show_ref(repo, refs, prefix="")
def cmd_show_ref(args): """Calling function for vcs show-ref command""" repo = repo_find() refs = ref_list(repo) show_ref(repo, refs, prefix="")
Python
def cmd_set(args): """calling function for vcs set command""" repo = repo_find() path = repo_file(repo, "userInfo") f = open(path) content = f.read() parser = configparser.ConfigParser() parser.read_string(content) if args.name: parser["info"]["name"] = args.name if args.email: parser["info"]["email"] = args.email with open(path, "w") as f: parser.write(f)
def cmd_set(args): """calling function for vcs set command""" repo = repo_find() path = repo_file(repo, "userInfo") f = open(path) content = f.read() parser = configparser.ConfigParser() parser.read_string(content) if args.name: parser["info"]["name"] = args.name if args.email: parser["info"]["email"] = args.email with open(path, "w") as f: parser.write(f)
Python
def __insert_new_node(self, parent_node, child_node): """insert new node to FP tree Args: parent_node (FPNode): parent node of node that needs inserting child_node (FPNode): node that needs inserting """ parent_node.children.append(child_node) child_node.parent = parent_node
def __insert_new_node(self, parent_node, child_node): """insert new node to FP tree Args: parent_node (FPNode): parent node of node that needs inserting child_node (FPNode): node that needs inserting """ parent_node.children.append(child_node) child_node.parent = parent_node
Python
def __setup_nodelink_structure(self, node): """setting up header table when a new node is inserted to FPTree Args: node (FPNode): new inserted node """ if node.item_name not in self.header_table: self.header_table[node.item_name] = node self.__latest_itemlink[node.item_name] = node else: self.__latest_itemlink[node.item_name].node_link = node self.__latest_itemlink[node.item_name] = node
def __setup_nodelink_structure(self, node): """setting up header table when a new node is inserted to FPTree Args: node (FPNode): new inserted node """ if node.item_name not in self.header_table: self.header_table[node.item_name] = node self.__latest_itemlink[node.item_name] = node else: self.__latest_itemlink[node.item_name].node_link = node self.__latest_itemlink[node.item_name] = node
Python
def buildTreeFromData(self, transactions): """build FP-tree from a set of transactions Args: transactions (list): list of transactions. Each transaction should be a list of item. For example, here is a valid list of transactions [['a', 'b', 'c'], ['c']] """ self.reset() item2freq = get_sorted_frequent_item_from_data( transactions, self.min_sup) if len(item2freq) > 0: for transaction in transactions: filtered_transaction = list( filter(lambda item: item in item2freq, transaction)) if len(filtered_transaction) > 0: self.__insert_tree( sorted(filtered_transaction, key=lambda item: item2freq[item] ), self.root )
def buildTreeFromData(self, transactions): """build FP-tree from a set of transactions Args: transactions (list): list of transactions. Each transaction should be a list of item. For example, here is a valid list of transactions [['a', 'b', 'c'], ['c']] """ self.reset() item2freq = get_sorted_frequent_item_from_data( transactions, self.min_sup) if len(item2freq) > 0: for transaction in transactions: filtered_transaction = list( filter(lambda item: item in item2freq, transaction)) if len(filtered_transaction) > 0: self.__insert_tree( sorted(filtered_transaction, key=lambda item: item2freq[item] ), self.root )
Python
def isSinglePathTree(self): """check whether FP-tree has only one path Returns: bool: True if tree has only one path, False otherwise """ for item in self.header_table: if self.header_table[item].node_link is not None: return False return True
def isSinglePathTree(self): """check whether FP-tree has only one path Returns: bool: True if tree has only one path, False otherwise """ for item in self.header_table: if self.header_table[item].node_link is not None: return False return True
Python
def __get_conditional_pattern_base_of_item(self, item): """get conditional pattern base of an item in header table Args: item (str): item name Returns: list[tuple[str, int]]: conditional pattern base of an item """ current_header = self.header_table[item] conditional_pattern_bases = [] while current_header is not None: current_header_pattern = self.__get_header_pattern(current_header) if len(current_header_pattern) > 1: conditional_pattern_bases.append(current_header_pattern) current_header = current_header.node_link return conditional_pattern_bases
def __get_conditional_pattern_base_of_item(self, item): """get conditional pattern base of an item in header table Args: item (str): item name Returns: list[tuple[str, int]]: conditional pattern base of an item """ current_header = self.header_table[item] conditional_pattern_bases = [] while current_header is not None: current_header_pattern = self.__get_header_pattern(current_header) if len(current_header_pattern) > 1: conditional_pattern_bases.append(current_header_pattern) current_header = current_header.node_link return conditional_pattern_bases
Python
def isEmptyTree(self): """check if a tree is empty Returns: bool: True if yes, False if not """ return len(self.root.children) == 0
def isEmptyTree(self): """check if a tree is empty Returns: bool: True if yes, False if not """ return len(self.root.children) == 0
Python
def __getItemListFromSinglePathTree(fp_tree): """get all items of a single-path tree Args: fp_tree (FPTree): object of FPTree Returns: list[tuple[str, int]]: a list of tuples, each of which has item name and item count """ current_node = fp_tree.root items = [] support = None while len(current_node.children) == 1: items.append( (current_node.children[0].item_name, current_node.children[0].count)) current_node = current_node.children[0] return items
def __getItemListFromSinglePathTree(fp_tree): """get all items of a single-path tree Args: fp_tree (FPTree): object of FPTree Returns: list[tuple[str, int]]: a list of tuples, each of which has item name and item count """ current_node = fp_tree.root items = [] support = None while len(current_node.children) == 1: items.append( (current_node.children[0].item_name, current_node.children[0].count)) current_node = current_node.children[0] return items
Python
def __grow_single_path_tree(fp_tree, dest, frequent_set): """mine a single-path tree Args: fp_tree (FPTree): object of FPTree. This object should have only one path dest (list): destination array where items are stored frequent_set (list): potential frequent itemset """ tree_nodes = FPTree.__getItemListFromSinglePathTree(fp_tree) for i in range(len(tree_nodes)): item_combinations = combinations(tree_nodes, i+1) for item_combination in item_combinations: frequent_items = frequent_set + list(item_combination) min_sup = min(map(lambda item: item[1], frequent_items)) frequent_items = ( [item for item, _ in frequent_items], min_sup) dest.append(frequent_items)
def __grow_single_path_tree(fp_tree, dest, frequent_set): """mine a single-path tree Args: fp_tree (FPTree): object of FPTree. This object should have only one path dest (list): destination array where items are stored frequent_set (list): potential frequent itemset """ tree_nodes = FPTree.__getItemListFromSinglePathTree(fp_tree) for i in range(len(tree_nodes)): item_combinations = combinations(tree_nodes, i+1) for item_combination in item_combinations: frequent_items = frequent_set + list(item_combination) min_sup = min(map(lambda item: item[1], frequent_items)) frequent_items = ( [item for item, _ in frequent_items], min_sup) dest.append(frequent_items)
Python
def forward(self, x): """ Forward function acceps a Tensor of input data and returns a tensor of output data. """ #out = self.dropout(self.relu1(self.bn1(self.fc1(x)))) #out = self.dropout(self.relu2(self.bn2(self.fc2(out)))) #out = self.dropout(self.relu3(self.bn3(self.fc3(out)))) #out = self.dropout(self.relu4(self.bn4(self.fc4(out)))) #out = self.dropout(self.relu5(self.bn5(self.fc5(out)))) out = self.dropout(self.relu1(self.fc1(x))) out = self.dropout(self.relu2(self.fc2(out))) out = self.dropout(self.relu3(self.fc3(out))) out = self.dropout(self.relu4(self.fc4(out))) out = self.dropout(self.relu5(self.fc5(out))) out = self.fc_output_activation(self.fc_output(out)) return out
def forward(self, x): """ Forward function acceps a Tensor of input data and returns a tensor of output data. """ #out = self.dropout(self.relu1(self.bn1(self.fc1(x)))) #out = self.dropout(self.relu2(self.bn2(self.fc2(out)))) #out = self.dropout(self.relu3(self.bn3(self.fc3(out)))) #out = self.dropout(self.relu4(self.bn4(self.fc4(out)))) #out = self.dropout(self.relu5(self.bn5(self.fc5(out)))) out = self.dropout(self.relu1(self.fc1(x))) out = self.dropout(self.relu2(self.fc2(out))) out = self.dropout(self.relu3(self.fc3(out))) out = self.dropout(self.relu4(self.fc4(out))) out = self.dropout(self.relu5(self.fc5(out))) out = self.fc_output_activation(self.fc_output(out)) return out
Python
def airports_observable(self): """Returns rx.Observable that emits airport data""" def emit_airports(observer): reader = csv.reader(self.airports_file()) reader.__next__() # skip the header for row in reader: observer.on_next(Airport(*row)) observer.on_completed() return Observable.create(emit_airports)
def airports_observable(self): """Returns rx.Observable that emits airport data""" def emit_airports(observer): reader = csv.reader(self.airports_file()) reader.__next__() # skip the header for row in reader: observer.on_next(Airport(*row)) observer.on_completed() return Observable.create(emit_airports)
Python
def flights_observable(self, year): """Returns rx.Observable that emits flight data for the specified year""" def emit_flights(observer): reader = csv.reader(self.flights_file(year)) reader.__next__() # skip the header for row in reader: observer.on_next(Flight(*row)) observer.on_completed() return Observable.create(emit_flights)
def flights_observable(self, year): """Returns rx.Observable that emits flight data for the specified year""" def emit_flights(observer): reader = csv.reader(self.flights_file(year)) reader.__next__() # skip the header for row in reader: observer.on_next(Flight(*row)) observer.on_completed() return Observable.create(emit_flights)
Python
def report_airports_for_state(): """List airports for a specific state""" context = ReportContext() context.state = prompt.string("State abbreviation: ").upper() print("\nIATA\tAirport Name\t\t\t\t\tCity") print("-" * 77) impl().report_airports_for_state(context)
def report_airports_for_state(): """List airports for a specific state""" context = ReportContext() context.state = prompt.string("State abbreviation: ").upper() print("\nIATA\tAirport Name\t\t\t\t\tCity") print("-" * 77) impl().report_airports_for_state(context)
Python
def report_airports_with_highest_cancellation_rate(): """List airports with highest cancellation rates""" context = ReportContext() context.year = prompt.integer("Year: ") context.limit = prompt.integer("Limit: ") print("IATA\tAirport Name\t\t\t\tCancelled %") print("-" * 60) impl().report_airports_with_highest_cancellation_rate(context)
def report_airports_with_highest_cancellation_rate(): """List airports with highest cancellation rates""" context = ReportContext() context.year = prompt.integer("Year: ") context.limit = prompt.integer("Limit: ") print("IATA\tAirport Name\t\t\t\tCancelled %") print("-" * 60) impl().report_airports_with_highest_cancellation_rate(context)
Python
def add_flight(self, flight): """Aggregate various metrics based on the specified flight""" self.totalFlights += 1 if flight.Origin == self.subject.iata: self.totalOrigins += 1 # cancellations are counted only for the origin airport if flight.Cancelled: self.totalCancelled += 1 if flight.CancellationCode == 'A': self.totalCancelledCarrier += 1 elif flight.CancellationCode == 'B': self.totalCancelledWeather += 1 elif flight.CancellationCode == 'C': self.totalCancelledNAS += 1 elif flight.CancellationCode == 'D': self.totalCancelledSecurity += 1 elif flight.Dest == self.subject.iata: self.totalDestinations += 1 # diversions are counted only for the destination airport if flight.Diverted: self.totalDiverted += 1
def add_flight(self, flight): """Aggregate various metrics based on the specified flight""" self.totalFlights += 1 if flight.Origin == self.subject.iata: self.totalOrigins += 1 # cancellations are counted only for the origin airport if flight.Cancelled: self.totalCancelled += 1 if flight.CancellationCode == 'A': self.totalCancelledCarrier += 1 elif flight.CancellationCode == 'B': self.totalCancelledWeather += 1 elif flight.CancellationCode == 'C': self.totalCancelledNAS += 1 elif flight.CancellationCode == 'D': self.totalCancelledSecurity += 1 elif flight.Dest == self.subject.iata: self.totalDestinations += 1 # diversions are counted only for the destination airport if flight.Diverted: self.totalDiverted += 1
Python
def has_alpha(self, text: str): """ Checks if the comment has at least one letter. Args: - text: the comment to be checked Returns: - True if the comment has at least one letter, False otherwise """ return any(char.isalpha() for char in text)
def has_alpha(self, text: str): """ Checks if the comment has at least one letter. Args: - text: the comment to be checked Returns: - True if the comment has at least one letter, False otherwise """ return any(char.isalpha() for char in text)
Python
def only_contains_anon(self, text: str): """ Checks if the comment only contains anonymous reserved words. Args: - text: the comment to be checked Returns: - True if the comment only contains anonymous reserved words, False otherwise """ anon_words = ["USER", "HASHTAG", "URL"] for word in text.split(): if word not in anon_words: return False return True
def only_contains_anon(self, text: str): """ Checks if the comment only contains anonymous reserved words. Args: - text: the comment to be checked Returns: - True if the comment only contains anonymous reserved words, False otherwise """ anon_words = ["USER", "HASHTAG", "URL"] for word in text.split(): if word not in anon_words: return False return True
Python
def has_acceptable_length(self, text: str): """ Checks if the comment has an acceptable length. Args: - text: the comment to be checked Returns: - True if the comment has an acceptable length, False otherwise """ return len(text) <= self.max_length
def has_acceptable_length(self, text: str): """ Checks if the comment has an acceptable length. Args: - text: the comment to be checked Returns: - True if the comment has an acceptable length, False otherwise """ return len(text) <= self.max_length
Python
def is_empty(self, text: str): """ Checks if the comment is empty. Args: - text: the comment to be checked Returns: - True if the comment is empty, False otherwise """ if text in ["", " ", "\n", "\t", None]: return True else: return False
def is_empty(self, text: str): """ Checks if the comment is empty. Args: - text: the comment to be checked Returns: - True if the comment is empty, False otherwise """ if text in ["", " ", "\n", "\t", None]: return True else: return False
Python
def _get_video_id(video_url: str): """Get video id from a YouTube video url. Args: - video_url: YouTube video url (e.g. https://www.youtube.com/watch?v=dQw4w9WgXcQ) Returns: - video_id: YouTube video id """ return video_url.split("watch?v=")[1].split("&")[0]
def _get_video_id(video_url: str): """Get video id from a YouTube video url. Args: - video_url: YouTube video url (e.g. https://www.youtube.com/watch?v=dQw4w9WgXcQ) Returns: - video_id: YouTube video id """ return video_url.split("watch?v=")[1].split("&")[0]