language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def residuals(pars: Parameter , data: dict) -> ndarray: """Residuals between a spectrum and a linear combination of references. Parameters ---------- pars Parameter object from ``lmfit`` containing the amplitude coefficients for each reference spectrum. At least attribute 'amp1' should exist in the object. data Dictionary with the scan and reference arrays. At least keys 'scan' and 'ref1' should exist in the dictionary. Returns ------- : Array with residuals. Important ----- The number of 'amp' attributes in ``pars`` should match the number of 'ref' keys in ``data``. Example ------- >>> from numpy import allclose >>> from lmfit import Parameters >>> from araucaria.fit import residuals >>> pars = Parameters() >>> pars.add('amp1', value=0.4) >>> pars.add('amp2', value=0.7) >>> data = {'scan': 2, 'ref1': 1.0, 'ref2': 2.0} >>> allclose(residuals(pars, data), 0.2) True >>> data['scan'] = 1.6 >>> allclose(residuals(pars, data), -0.2) True """ eps = 1.0 return (data['scan'] - sum_references(pars, data))/eps
def residuals(pars: Parameter , data: dict) -> ndarray: """Residuals between a spectrum and a linear combination of references. Parameters ---------- pars Parameter object from ``lmfit`` containing the amplitude coefficients for each reference spectrum. At least attribute 'amp1' should exist in the object. data Dictionary with the scan and reference arrays. At least keys 'scan' and 'ref1' should exist in the dictionary. Returns ------- : Array with residuals. Important ----- The number of 'amp' attributes in ``pars`` should match the number of 'ref' keys in ``data``. Example ------- >>> from numpy import allclose >>> from lmfit import Parameters >>> from araucaria.fit import residuals >>> pars = Parameters() >>> pars.add('amp1', value=0.4) >>> pars.add('amp2', value=0.7) >>> data = {'scan': 2, 'ref1': 1.0, 'ref2': 2.0} >>> allclose(residuals(pars, data), 0.2) True >>> data['scan'] = 1.6 >>> allclose(residuals(pars, data), -0.2) True """ eps = 1.0 return (data['scan'] - sum_references(pars, data))/eps
Python
def bar_chart_race(df, filename=None, orientation='h', sort='desc', n_bars=None, label_bars=True, use_index=True, steps_per_period=10, period_length=500, figsize=(6.5, 3.5), cmap='dark24', title=None, bar_label_size=7, tick_label_size=7, period_label_size=16, fig=None, **kwargs): ''' Create an animated bar chart race using matplotlib. Data must be in 'wide' format where each row represents a single time period and each column represents a distinct category. Optionally, the index can label the time period. Bar height and location change linearly from one time period to the next. This is resource intensive - Start with just a few rows of data Parameters ---------- df : pandas DataFrame Must be 'wide' where each row represents a single period of time. Each column contains the values of the bars for that category. Optionally, use the index to label each time period. filename : `None` or str, default None If `None` return animation as HTML5. If a string, save animation to that filename location. Use .mp4 or .gif extensions orientation : 'h' or 'v', default 'h' Bar orientation - horizontal or vertical sort : 'desc' or 'asc', default 'desc' Choose how to sort the bars. Use 'desc' to put largest bars on top and 'asc' to place largest bars on bottom. n_bars : int, default None Choose the maximum number of bars to display on the graph. By default, use all bars. New bars entering the race will appear from the bottom or top. label_bars : bool, default `True` Whether to label the bars with their value on their right use_index : bool, default `True` Whether to use the index as the text in the plot steps_per_period : int, default 10 The number of steps to go from one time period to the next. The bar will grow linearly between each period. period_length : int, default 500 Number of milliseconds to animate each period (row). Default is 500ms (half of a second) figsize : two-item tuple of numbers, default (6.5, 3.5) matplotlib figure size in inches. Will be overridden if own figure supplied to `fig` cmap : str, matplotlib colormap instance, or list of colors, default 'dark24' Colors to be used for the bars. Colors will repeat if there are more bars than colors. title : str, default None Title of plot bar_label_size : int, float, default 7 Size in points of numeric labels just outside of the bars tick_label_size : int, float, default 7 Size in points of tick labels period_label_size : int, float, default 16 Size in points of label plotted with the axes that labels the period. fig : matplotlib Figure, default None For greater control over the aesthetics, supply your own figure with a single axes. **kwargs : key, value pairs Other keyword arguments passed to the matplotlib barh/bar function. Returns ------- Either HTML5 video or creates an mp4/gif file of the animation and returns `None` Notes ----- Default DPI of 144 It is possible for some bars to be out of order momentarily during a transition since both height and location change linearly. Examples -------- Use the `load_data` function to get an example dataset to create an animation. df = bcr.load_data('covid19') bcr.bar_chart_race( df=df, filename='covid19_horiz_desc.mp4', orientation='h', sort='desc', label_bars=True, use_index=True, steps_per_period=10, period_length=500, cmap='dark24', title='COVID-19 Deaths by Country', bar_label_size=7, tick_label_size=7, period_label_size=16, fig=None) ''' bcr = _BarChartRace(df, filename, orientation, sort, n_bars, label_bars, use_index, steps_per_period, period_length, figsize, cmap, title, bar_label_size, tick_label_size, period_label_size, fig, kwargs) return bcr.make_animation()
def bar_chart_race(df, filename=None, orientation='h', sort='desc', n_bars=None, label_bars=True, use_index=True, steps_per_period=10, period_length=500, figsize=(6.5, 3.5), cmap='dark24', title=None, bar_label_size=7, tick_label_size=7, period_label_size=16, fig=None, **kwargs): ''' Create an animated bar chart race using matplotlib. Data must be in 'wide' format where each row represents a single time period and each column represents a distinct category. Optionally, the index can label the time period. Bar height and location change linearly from one time period to the next. This is resource intensive - Start with just a few rows of data Parameters ---------- df : pandas DataFrame Must be 'wide' where each row represents a single period of time. Each column contains the values of the bars for that category. Optionally, use the index to label each time period. filename : `None` or str, default None If `None` return animation as HTML5. If a string, save animation to that filename location. Use .mp4 or .gif extensions orientation : 'h' or 'v', default 'h' Bar orientation - horizontal or vertical sort : 'desc' or 'asc', default 'desc' Choose how to sort the bars. Use 'desc' to put largest bars on top and 'asc' to place largest bars on bottom. n_bars : int, default None Choose the maximum number of bars to display on the graph. By default, use all bars. New bars entering the race will appear from the bottom or top. label_bars : bool, default `True` Whether to label the bars with their value on their right use_index : bool, default `True` Whether to use the index as the text in the plot steps_per_period : int, default 10 The number of steps to go from one time period to the next. The bar will grow linearly between each period. period_length : int, default 500 Number of milliseconds to animate each period (row). Default is 500ms (half of a second) figsize : two-item tuple of numbers, default (6.5, 3.5) matplotlib figure size in inches. Will be overridden if own figure supplied to `fig` cmap : str, matplotlib colormap instance, or list of colors, default 'dark24' Colors to be used for the bars. Colors will repeat if there are more bars than colors. title : str, default None Title of plot bar_label_size : int, float, default 7 Size in points of numeric labels just outside of the bars tick_label_size : int, float, default 7 Size in points of tick labels period_label_size : int, float, default 16 Size in points of label plotted with the axes that labels the period. fig : matplotlib Figure, default None For greater control over the aesthetics, supply your own figure with a single axes. **kwargs : key, value pairs Other keyword arguments passed to the matplotlib barh/bar function. Returns ------- Either HTML5 video or creates an mp4/gif file of the animation and returns `None` Notes ----- Default DPI of 144 It is possible for some bars to be out of order momentarily during a transition since both height and location change linearly. Examples -------- Use the `load_data` function to get an example dataset to create an animation. df = bcr.load_data('covid19') bcr.bar_chart_race( df=df, filename='covid19_horiz_desc.mp4', orientation='h', sort='desc', label_bars=True, use_index=True, steps_per_period=10, period_length=500, cmap='dark24', title='COVID-19 Deaths by Country', bar_label_size=7, tick_label_size=7, period_label_size=16, fig=None) ''' bcr = _BarChartRace(df, filename, orientation, sort, n_bars, label_bars, use_index, steps_per_period, period_length, figsize, cmap, title, bar_label_size, tick_label_size, period_label_size, fig, kwargs) return bcr.make_animation()
Python
def contains(self, point): ''' Determine if point lies within the polygon. ''' # initially winds is 0 winds = 0 # iterate over edges for i in xrange(len(self.vertices)-1): # add wind if edge crosses point going up and point is to left if (self.vertices[i][1] < point[1] < self.vertices[i+1][1] and left_of_edge(point, self.vertices[i], self.vertices[i+1])): winds += 1 # end wind if edge crosses point going down and point is to right elif (self.vertices[i][1] > point[1] > self.vertices[i+1][1] and not left_of_edge(point, self.vertices[i], self.vertices[i+1])): winds -= 1 # point is contained if net winds is not zero return winds != 0
def contains(self, point): ''' Determine if point lies within the polygon. ''' # initially winds is 0 winds = 0 # iterate over edges for i in xrange(len(self.vertices)-1): # add wind if edge crosses point going up and point is to left if (self.vertices[i][1] < point[1] < self.vertices[i+1][1] and left_of_edge(point, self.vertices[i], self.vertices[i+1])): winds += 1 # end wind if edge crosses point going down and point is to right elif (self.vertices[i][1] > point[1] > self.vertices[i+1][1] and not left_of_edge(point, self.vertices[i], self.vertices[i+1])): winds -= 1 # point is contained if net winds is not zero return winds != 0
Python
def from_shapefile(obj, rec): """ Factory function that creates a Entity (or derived class) from a census.gov shapefile. """ # by using the LSAD determine if a subclass is defined for this entity lsad_mapping = { ('01'): State, ('C1', 'C2', 'C3', 'C4'): CongressDistrict } for lsads, cls in lsad_mapping.iteritems(): if rec['LSAD'] in lsads: return cls.from_shapefile(obj, rec) # if there is no mapping for the LSAD just construct a Entity return Entity('', rec['LSAD_TRANS'], obj.vertices(), obj.extents())
def from_shapefile(obj, rec): """ Factory function that creates a Entity (or derived class) from a census.gov shapefile. """ # by using the LSAD determine if a subclass is defined for this entity lsad_mapping = { ('01'): State, ('C1', 'C2', 'C3', 'C4'): CongressDistrict } for lsads, cls in lsad_mapping.iteritems(): if rec['LSAD'] in lsads: return cls.from_shapefile(obj, rec) # if there is no mapping for the LSAD just construct a Entity return Entity('', rec['LSAD_TRANS'], obj.vertices(), obj.extents())
Python
def contains(self, point): """ Check if a point lies within any of the entities polygons """ if self.in_rect(point): for poly in self.polygons: if poly.contains(point): return True return False
def contains(self, point): """ Check if a point lies within any of the entities polygons """ if self.in_rect(point): for poly in self.polygons: if poly.contains(point): return True return False
Python
def to_kml(self): """ Return a KML Placemark representing the entity """ return """<Placemark><name>%s</name> <MultiGeometry>%s</MultiGeometry> </Placemark>""" % (self.name, ''.join(poly.to_kml() for poly in self.polygons))
def to_kml(self): """ Return a KML Placemark representing the entity """ return """<Placemark><name>%s</name> <MultiGeometry>%s</MultiGeometry> </Placemark>""" % (self.name, ''.join(poly.to_kml() for poly in self.polygons))
Python
def from_shapefile(obj, rec): """ Construct a CongressDistrict from a census.gov shapefile """ return CongressDistrict(rec['LSAD_TRANS'], obj.vertices(), obj.extents(), FIPS_TO_STATE[rec['STATE']], rec['CD'])
def from_shapefile(obj, rec): """ Construct a CongressDistrict from a census.gov shapefile """ return CongressDistrict(rec['LSAD_TRANS'], obj.vertices(), obj.extents(), FIPS_TO_STATE[rec['STATE']], rec['CD'])
Python
def read_census_shapefile(filename): """Read census shapefile and return list of entity-derived objects. Given the base name of a census .shp/.dbf file returns a list of all Entity-derived objects described by the the file. """ try: shp = ShapeFile(filename) except IOError: raise ShapefileError('Could not open %s.shp' % filename) try: dbf = DBFFile(filename) except IOError: raise ShapefileError('Could not open %s.dbf' % filename) shape_count = shp.info()[0] # shape_count should always equal dbf.record_count() if shape_count != dbf.record_count(): raise ShapefileError('SHP/DBF record count mismatch (SHP=%d, DBF=%d)' % (shape_count, dbf.record_count())) # generator version #for i in xrange(shp.info()[0]): # yield Entity.fromShapefile(shp.read_object(i), dbf.read_record(i)) # shp.info()[0] is the number of objects return [Entity.from_shapefile(shp.read_object(i), dbf.read_record(i)) for i in xrange(shape_count)]
def read_census_shapefile(filename): """Read census shapefile and return list of entity-derived objects. Given the base name of a census .shp/.dbf file returns a list of all Entity-derived objects described by the the file. """ try: shp = ShapeFile(filename) except IOError: raise ShapefileError('Could not open %s.shp' % filename) try: dbf = DBFFile(filename) except IOError: raise ShapefileError('Could not open %s.dbf' % filename) shape_count = shp.info()[0] # shape_count should always equal dbf.record_count() if shape_count != dbf.record_count(): raise ShapefileError('SHP/DBF record count mismatch (SHP=%d, DBF=%d)' % (shape_count, dbf.record_count())) # generator version #for i in xrange(shp.info()[0]): # yield Entity.fromShapefile(shp.read_object(i), dbf.read_record(i)) # shp.info()[0] is the number of objects return [Entity.from_shapefile(shp.read_object(i), dbf.read_record(i)) for i in xrange(shape_count)]
Python
def _google_geocode(self, address): """Convert an address into a latitude/longitude via google maps""" url = 'http://maps.google.com/maps/geo?output=csv&q=%s&key=%s' % \ (urllib.quote(address), self.apikey) # returns status,level-of-detail,lat,long status, _, lat, lng = urllib.urlopen(url).read().split(',') # 200 - OK if status == '200': return lat, lng else: raise GeocodingError(status)
def _google_geocode(self, address): """Convert an address into a latitude/longitude via google maps""" url = 'http://maps.google.com/maps/geo?output=csv&q=%s&key=%s' % \ (urllib.quote(address), self.apikey) # returns status,level-of-detail,lat,long status, _, lat, lng = urllib.urlopen(url).read().split(',') # 200 - OK if status == '200': return lat, lng else: raise GeocodingError(status)
Python
def _geocoderus_geocode(self, address): """Convert an address into a latitude/longitude via geocoder.us""" if not address: raise GeocodingError(601) # empty address url = 'http://rpc.geocoder.us/service/csv?address=%s' % \ urllib.quote(address) data = urllib.urlopen(url).readline() # only get first line for now # returns lat,long,street,city,state,zip or #: errmsg if data.startswith('2:'): raise GeocodingError(602) # address not found try: lat, lng, _, _, _, _ = data.split(',') return lat, lng except ValueError: raise GeocodingError(500, data)
def _geocoderus_geocode(self, address): """Convert an address into a latitude/longitude via geocoder.us""" if not address: raise GeocodingError(601) # empty address url = 'http://rpc.geocoder.us/service/csv?address=%s' % \ urllib.quote(address) data = urllib.urlopen(url).readline() # only get first line for now # returns lat,long,street,city,state,zip or #: errmsg if data.startswith('2:'): raise GeocodingError(602) # address not found try: lat, lng, _, _, _, _ = data.split(',') return lat, lng except ValueError: raise GeocodingError(500, data)
Python
def lat_long_to_district(self, lat, lng): """ Obtain the district containing a given latitude and longitude.""" flat, flng = float(lat), -abs(float(lng)) districts = [] for cb in self.boundaries: if cb.contains((flng,flat)): if cb.district == '98': cb.district = '00' elif cb.district[0] == '0': cb.district = cb.district[1] districts.append((cb.state, cb.district)) return lat, lng, districts
def lat_long_to_district(self, lat, lng): """ Obtain the district containing a given latitude and longitude.""" flat, flng = float(lat), -abs(float(lng)) districts = [] for cb in self.boundaries: if cb.contains((flng,flat)): if cb.district == '98': cb.district = '00' elif cb.district[0] == '0': cb.district = cb.district[1] districts.append((cb.state, cb.district)) return lat, lng, districts
Python
def address_to_district(self, address): """Given an address returns the congressional district it lies within. This function works by geocoding the address and then finding the point that the returned lat/long returned lie within. """ if self.geocoder == self.GEOCODER_GMAPS: lat, lng = self._google_geocode(address) elif self.geocoder == self.GEOCODER_US: lat, lng = self._geocoderus_geocode(address) return self.lat_long_to_district(lat, lng)
def address_to_district(self, address): """Given an address returns the congressional district it lies within. This function works by geocoding the address and then finding the point that the returned lat/long returned lie within. """ if self.geocoder == self.GEOCODER_GMAPS: lat, lng = self._google_geocode(address) elif self.geocoder == self.GEOCODER_US: lat, lng = self._geocoderus_geocode(address) return self.lat_long_to_district(lat, lng)
Python
def singletonSubCovers(causes, M, w, verbose=False, timeout=300): """ Finds all singleton covers for each sub-sequence of an observed sequence. Inputs: causes: The handle to the causes function that defines the causal relation. M: The upper bound on the length of any effect sequence in the causal relation. w: The observed sequence to be explained. verbose: Boolean flag for whether to print out status updates. timeout: The maximum number of seconds to allow singletonSubCovers to run. Outputs: status: True if the run finished, False if it timed out. g: The table of singleton sub-covers. g[j,k] is a set of tuples of the form (u, v, d_min, d_max, ts), where u is a singleton cover of w[j:k], v is u's immediate child sequence in the covering tree, d_min is the shortest causal chain length in the covering tree, d_max is the longest causal chain length in the covering tree, ts is the total number of nodes in the covering tree. """ start = time.process_time() # Initialize g N = len(w) g = [{(j,k): set() for (j,k) in itr.combinations(range(N+1),2)}] for j in range(N): g[0][j,j+1] = set([(w[j], (), 0, 0, 1)]) # 0-based indexing into w for ell in itr.count(1): g.append({(j,k): set(g[ell-1][j,k]) for (j,k) in g[ell-1]}) # copy (ell-1) covers for m in range(1,M+1): for k in itr.combinations(range(N+1),m+1): for uvdt in itr.product(*[g[ell-1][k[i-1],k[i]] for i in range(1,m+1)]): if time.process_time()-start > timeout: return False, g[ell] u = tuple(u for (u,_,_,_,_) in uvdt) d_min = min(d for (_,_,d,_,_) in uvdt) + 1 d_max = max(d for (_,_,_,d,_) in uvdt) + 1 ts = sum(s for (_,_,_,_,s) in uvdt) + 1 g[ell][k[0],k[m]] |= set((cu, u, d_min, d_max, ts) for cu in causes(u)) if verbose: print("ell=%d, max |g| = %d"%(ell, max([len(g[ell][jk]) for jk in g[ell]]))) if g[ell]==g[ell-1]: return True, g[ell]
def singletonSubCovers(causes, M, w, verbose=False, timeout=300): """ Finds all singleton covers for each sub-sequence of an observed sequence. Inputs: causes: The handle to the causes function that defines the causal relation. M: The upper bound on the length of any effect sequence in the causal relation. w: The observed sequence to be explained. verbose: Boolean flag for whether to print out status updates. timeout: The maximum number of seconds to allow singletonSubCovers to run. Outputs: status: True if the run finished, False if it timed out. g: The table of singleton sub-covers. g[j,k] is a set of tuples of the form (u, v, d_min, d_max, ts), where u is a singleton cover of w[j:k], v is u's immediate child sequence in the covering tree, d_min is the shortest causal chain length in the covering tree, d_max is the longest causal chain length in the covering tree, ts is the total number of nodes in the covering tree. """ start = time.process_time() # Initialize g N = len(w) g = [{(j,k): set() for (j,k) in itr.combinations(range(N+1),2)}] for j in range(N): g[0][j,j+1] = set([(w[j], (), 0, 0, 1)]) # 0-based indexing into w for ell in itr.count(1): g.append({(j,k): set(g[ell-1][j,k]) for (j,k) in g[ell-1]}) # copy (ell-1) covers for m in range(1,M+1): for k in itr.combinations(range(N+1),m+1): for uvdt in itr.product(*[g[ell-1][k[i-1],k[i]] for i in range(1,m+1)]): if time.process_time()-start > timeout: return False, g[ell] u = tuple(u for (u,_,_,_,_) in uvdt) d_min = min(d for (_,_,d,_,_) in uvdt) + 1 d_max = max(d for (_,_,_,d,_) in uvdt) + 1 ts = sum(s for (_,_,_,_,s) in uvdt) + 1 g[ell][k[0],k[m]] |= set((cu, u, d_min, d_max, ts) for cu in causes(u)) if verbose: print("ell=%d, max |g| = %d"%(ell, max([len(g[ell][jk]) for jk in g[ell]]))) if g[ell]==g[ell-1]: return True, g[ell]
Python
def topLevelCovers(g, N, M, u=(), k=(0,), d_min=(), d_max=(), ts=(), narrow=False): """ A python generator that yields all top-level covers for an observed sequence. Inputs: g: the table of singleton sub-covers as returned by singletonSubCovers. N: the length of the observed sequence. M: The upper bound on the length of any effect sequence in the causal relation. u, k, d_min, d_max, ts: internal accumulators (not user-level inputs) narrow: if False, d_min, d_max, ts are retained, but if the same u covers different v, duplicates may be returned if True, d_min, d_max, ts are discarded, but no duplicates are returned Outputs: t: a top-level cover of the form (u, k, d_min, d_max, ts), where u[i] is the i^{th} root in the covering forest, k[i] is the index of the first observation covered by u[i], d_min[i] is the shortest causal chain length in u[i]'s covering tree, d_max[i] is the longest causal chain length in u[i]'s covering tree, ts[i] is the total number of nodes in u[i]'s covering tree. """ if k[-1]==N: yield (u, k, d_min, d_max, ts) else: for k1 in range(k[-1]+1,N+1): if not narrow: g1 = g[k[-1],k1] else: g1 = set([(u1,None,None,None,None) for (u1,_,_,_,_) in g[k[-1],k1]]) # for (u1,_,d1_min, d1_max, ts1) in g[k[-1],k1]: # includes duplicates if same u with different _,etc. for (u1,_,d1_min,d1_max,ts1) in g1: if any([(u+(u1,))[-m:] == v for m in range(1,min(len(k),M)+1) for (_,v,_,_,_) in g[k[-m],k1]]): continue for t in topLevelCovers(g, N, M, u+(u1,), k+(k1,), d_min+(d1_min,), d_max+(d1_max,), ts+(ts1,), narrow=narrow): yield t
def topLevelCovers(g, N, M, u=(), k=(0,), d_min=(), d_max=(), ts=(), narrow=False): """ A python generator that yields all top-level covers for an observed sequence. Inputs: g: the table of singleton sub-covers as returned by singletonSubCovers. N: the length of the observed sequence. M: The upper bound on the length of any effect sequence in the causal relation. u, k, d_min, d_max, ts: internal accumulators (not user-level inputs) narrow: if False, d_min, d_max, ts are retained, but if the same u covers different v, duplicates may be returned if True, d_min, d_max, ts are discarded, but no duplicates are returned Outputs: t: a top-level cover of the form (u, k, d_min, d_max, ts), where u[i] is the i^{th} root in the covering forest, k[i] is the index of the first observation covered by u[i], d_min[i] is the shortest causal chain length in u[i]'s covering tree, d_max[i] is the longest causal chain length in u[i]'s covering tree, ts[i] is the total number of nodes in u[i]'s covering tree. """ if k[-1]==N: yield (u, k, d_min, d_max, ts) else: for k1 in range(k[-1]+1,N+1): if not narrow: g1 = g[k[-1],k1] else: g1 = set([(u1,None,None,None,None) for (u1,_,_,_,_) in g[k[-1],k1]]) # for (u1,_,d1_min, d1_max, ts1) in g[k[-1],k1]: # includes duplicates if same u with different _,etc. for (u1,_,d1_min,d1_max,ts1) in g1: if any([(u+(u1,))[-m:] == v for m in range(1,min(len(k),M)+1) for (_,v,_,_,_) in g[k[-m],k1]]): continue for t in topLevelCovers(g, N, M, u+(u1,), k+(k1,), d_min+(d1_min,), d_max+(d1_max,), ts+(ts1,), narrow=narrow): yield t
Python
def explain(causes, w, M=None, verbose=False, timeout=600, max_tlcovs=13000000, narrow=False): """ Computes all explanations (top-level covers) for an observed sequence. Inputs: causes: A handle to the causes function. w: The observed sequence to be explained. M: The upper bound on the length of any effect sequence in the causal relation. verbose: Boolean flag for whether to print out status updates. timeout: The maximum number of seconds to allow explain to run. max_tlcovs: The maximum number of top-level covers to enumerate. narrow: passed to topLevelCovers Outputs: status: String indicating exit status: "Success", "SS covers timed out", "TL covers timed out", or "TL covers maxed out". tlcovs: A list of top-level covers as generated by topLevelCovers. g: The table of singleton sub-covers """ if M is None: M = len(w) if verbose: print("Constructing explanations...") tlcovs = [] start = time.process_time() status, g = singletonSubCovers(causes, M, w, verbose=verbose, timeout=timeout) if status == False: if verbose: print("singletonSubCovers timed out :(") return "SS covers timed out", tlcovs, g for t in topLevelCovers(g, len(w), M, narrow=narrow): tlcovs.append(t) if time.process_time()-start > timeout: if verbose: print("topLevelCovers timed out :(") return "TL covers timed out", tlcovs, g if len(tlcovs) > max_tlcovs: if verbose: print("topLevelCovers maxed out :(") return "TL covers maxed out", tlcovs, g if verbose: print("Success!") return "Success", tlcovs, g
def explain(causes, w, M=None, verbose=False, timeout=600, max_tlcovs=13000000, narrow=False): """ Computes all explanations (top-level covers) for an observed sequence. Inputs: causes: A handle to the causes function. w: The observed sequence to be explained. M: The upper bound on the length of any effect sequence in the causal relation. verbose: Boolean flag for whether to print out status updates. timeout: The maximum number of seconds to allow explain to run. max_tlcovs: The maximum number of top-level covers to enumerate. narrow: passed to topLevelCovers Outputs: status: String indicating exit status: "Success", "SS covers timed out", "TL covers timed out", or "TL covers maxed out". tlcovs: A list of top-level covers as generated by topLevelCovers. g: The table of singleton sub-covers """ if M is None: M = len(w) if verbose: print("Constructing explanations...") tlcovs = [] start = time.process_time() status, g = singletonSubCovers(causes, M, w, verbose=verbose, timeout=timeout) if status == False: if verbose: print("singletonSubCovers timed out :(") return "SS covers timed out", tlcovs, g for t in topLevelCovers(g, len(w), M, narrow=narrow): tlcovs.append(t) if time.process_time()-start > timeout: if verbose: print("topLevelCovers timed out :(") return "TL covers timed out", tlcovs, g if len(tlcovs) > max_tlcovs: if verbose: print("topLevelCovers maxed out :(") return "TL covers maxed out", tlcovs, g if verbose: print("Success!") return "Success", tlcovs, g
Python
def irredundantTLCovers(tlcovs, timeout=300): """ Prune top-level covers for irredundancy Inputs: tlcovs: A list of top-level covers as returned by explain. timeout: The maximum number of seconds to allow irredundantTLCovers to run. Outputs: status: True if the run finished, False if it timed out. tlcovs_irr: The pruned top level covers. """ tlcovs_irr = [] start = time.process_time() for (u,k,d_min,d_max,ts) in tlcovs: u_is_irr = True # until proven otherwise for (other_u,_,_,_,_) in tlcovs: if time.process_time()-start > timeout: return False, tlcovs_irr # skip u1 if u==other_u: continue # check if other_u is a sub-sequence of u if len(other_u) > len(u): is_sub_seq = False else: is_sub_seq = True # until proven otherwise u_tail = u for other_u_i in other_u: if other_u_i in u_tail: u_tail = u_tail[u_tail.index(other_u_i)+1:] else: is_sub_seq = False break # other_u is a sub-sequence of u, u is redundant if is_sub_seq: u_is_irr = False break if u_is_irr: tlcovs_irr.append((u,k,d_min,d_max,ts)) return True, tlcovs_irr
def irredundantTLCovers(tlcovs, timeout=300): """ Prune top-level covers for irredundancy Inputs: tlcovs: A list of top-level covers as returned by explain. timeout: The maximum number of seconds to allow irredundantTLCovers to run. Outputs: status: True if the run finished, False if it timed out. tlcovs_irr: The pruned top level covers. """ tlcovs_irr = [] start = time.process_time() for (u,k,d_min,d_max,ts) in tlcovs: u_is_irr = True # until proven otherwise for (other_u,_,_,_,_) in tlcovs: if time.process_time()-start > timeout: return False, tlcovs_irr # skip u1 if u==other_u: continue # check if other_u is a sub-sequence of u if len(other_u) > len(u): is_sub_seq = False else: is_sub_seq = True # until proven otherwise u_tail = u for other_u_i in other_u: if other_u_i in u_tail: u_tail = u_tail[u_tail.index(other_u_i)+1:] else: is_sub_seq = False break # other_u is a sub-sequence of u, u is redundant if is_sub_seq: u_is_irr = False break if u_is_irr: tlcovs_irr.append((u,k,d_min,d_max,ts)) return True, tlcovs_irr
Python
def read_qr_code(image_file): ''' Reads QR codes from the indicated image file and returns a list of data. Parameters: image_file <string> The image file to read. Returns: List<string> The string contained in each detected QR code. ''' # Load image. try: image = Image.open(image_file) except OSError: print('File "{f}" doesn\'t appear to be an image... skipping.'.format(f=image_file)) return [] # We must convert the image to greyscale. However, any parts of the # image that contain an alpha channel are converted to black. Thus, # we replace the transparency with white before converting to # greyscale. if image.mode is 'RGBA': white_bg = Image.new('RGBA', image.size, (255, 255, 255)) image = Image.alpha_composite(white_bg, image) image = image.convert('L') # L = greyscale # 2D numpy array of uint8 pix = numpy.array(image) # Parse QR codes in the image res = SCANNER.scan(pix) # Extract data return [r.data for r in res]
def read_qr_code(image_file): ''' Reads QR codes from the indicated image file and returns a list of data. Parameters: image_file <string> The image file to read. Returns: List<string> The string contained in each detected QR code. ''' # Load image. try: image = Image.open(image_file) except OSError: print('File "{f}" doesn\'t appear to be an image... skipping.'.format(f=image_file)) return [] # We must convert the image to greyscale. However, any parts of the # image that contain an alpha channel are converted to black. Thus, # we replace the transparency with white before converting to # greyscale. if image.mode is 'RGBA': white_bg = Image.new('RGBA', image.size, (255, 255, 255)) image = Image.alpha_composite(white_bg, image) image = image.convert('L') # L = greyscale # 2D numpy array of uint8 pix = numpy.array(image) # Parse QR codes in the image res = SCANNER.scan(pix) # Extract data return [r.data for r in res]
Python
def reconstruct_files_from_qr(qr_files, output_directory=''): ''' Reconstructs files from a list of QR codes. QR codes containing multiple files may be passed into this function, and each file will be written as expected. A file is only constructed if all of its QR codes are present. Parameters: qr_files List<string> A list of QR file paths. output_directory <string> [Optional] The directory to save the reconstructed files. This directory is created if it doesn't exist. ''' # Try to create the output directory if it doesn't exist. if not confirm_dir(output_directory): LOGGER.warn('Failed to create output directory: %s', output_directory) return LOGGER.debug('Parsing %d QR files', len(qr_files)) file_data = {} # FileName -> {name: fileName, data: Array of b64} for f in qr_files: # Read image and detect QR codes contained within qr_json_list = read_qr_code(f) LOGGER.debug('Found %d QR codes in file %s', len(qr_json_list), f) # For each QR found in the image for qr_json in qr_json_list: qr_payload = json.loads(qr_json) # Extract fields chunk = qr_payload['chunkNumber'] totalChunks = qr_payload['totalChunks'] name = qr_payload['name'] data = qr_payload['data'] LOGGER.debug('qr_file: %s', f) LOGGER.debug('\tname: %s', name) LOGGER.debug('\tchunk: %d/%d', chunk, totalChunks) # Haven't seen this file yet, so initialize a new structure # in `file_data`. if not name in file_data: b64_data = [None] * (totalChunks + 1) file_data[name] = {'name': name, 'data': b64_data} # Save data into structure file_data[name]['data'][chunk] = data # For each file we read in... for f_id, f_info in file_data.items(): name = f_info['name'] data = f_info['data'] # Verify all chunks are present for the indicated file LOGGER.debug('Analyzing data for ID %s, name: %s', f_id, name) all_data_present = all(x is not None for x in data) if all_data_present: # All chunks present? Write back to file. LOGGER.debug('All data present for file: %s', name) complete_b64 = ''.join(data) b64_to_file(complete_b64, os.path.join(output_directory, name)) print('Successfully decoded file: {f}'.format(f=name)) else: # Compute missing data chunks missing_chunks = [i for i, e in enumerate(data) if e is None] print('Missing QR codes {mc} for file: {f}'.format(f=name, mc=missing_chunks))
def reconstruct_files_from_qr(qr_files, output_directory=''): ''' Reconstructs files from a list of QR codes. QR codes containing multiple files may be passed into this function, and each file will be written as expected. A file is only constructed if all of its QR codes are present. Parameters: qr_files List<string> A list of QR file paths. output_directory <string> [Optional] The directory to save the reconstructed files. This directory is created if it doesn't exist. ''' # Try to create the output directory if it doesn't exist. if not confirm_dir(output_directory): LOGGER.warn('Failed to create output directory: %s', output_directory) return LOGGER.debug('Parsing %d QR files', len(qr_files)) file_data = {} # FileName -> {name: fileName, data: Array of b64} for f in qr_files: # Read image and detect QR codes contained within qr_json_list = read_qr_code(f) LOGGER.debug('Found %d QR codes in file %s', len(qr_json_list), f) # For each QR found in the image for qr_json in qr_json_list: qr_payload = json.loads(qr_json) # Extract fields chunk = qr_payload['chunkNumber'] totalChunks = qr_payload['totalChunks'] name = qr_payload['name'] data = qr_payload['data'] LOGGER.debug('qr_file: %s', f) LOGGER.debug('\tname: %s', name) LOGGER.debug('\tchunk: %d/%d', chunk, totalChunks) # Haven't seen this file yet, so initialize a new structure # in `file_data`. if not name in file_data: b64_data = [None] * (totalChunks + 1) file_data[name] = {'name': name, 'data': b64_data} # Save data into structure file_data[name]['data'][chunk] = data # For each file we read in... for f_id, f_info in file_data.items(): name = f_info['name'] data = f_info['data'] # Verify all chunks are present for the indicated file LOGGER.debug('Analyzing data for ID %s, name: %s', f_id, name) all_data_present = all(x is not None for x in data) if all_data_present: # All chunks present? Write back to file. LOGGER.debug('All data present for file: %s', name) complete_b64 = ''.join(data) b64_to_file(complete_b64, os.path.join(output_directory, name)) print('Successfully decoded file: {f}'.format(f=name)) else: # Compute missing data chunks missing_chunks = [i for i, e in enumerate(data) if e is None] print('Missing QR codes {mc} for file: {f}'.format(f=name, mc=missing_chunks))
Python
def b64_to_file(b64_data, output_file): ''' Create a file from a base 64 string. Parameters: b64_data <string> The file encoded as a base 64 string. output_file <string> The location to save the decoded file. ''' with open(output_file, 'wb') as f: file_data = b64decode(b64_data) f.write(file_data)
def b64_to_file(b64_data, output_file): ''' Create a file from a base 64 string. Parameters: b64_data <string> The file encoded as a base 64 string. output_file <string> The location to save the decoded file. ''' with open(output_file, 'wb') as f: file_data = b64decode(b64_data) f.write(file_data)
Python
def write_qr_code(output_file, data): ''' Writes the data to a QR code, which is saved to the indicated file as a PNG. Paramters: output_file <string> The file to save the QR code to, encoded as PNG. data <string> The data to encode into the QR code. ''' img = qrcode.make(data) img.save(output_file)
def write_qr_code(output_file, data): ''' Writes the data to a QR code, which is saved to the indicated file as a PNG. Paramters: output_file <string> The file to save the QR code to, encoded as PNG. data <string> The data to encode into the QR code. ''' img = qrcode.make(data) img.save(output_file)
Python
def convert_file_to_qr(input_file, output_directory=''): ''' Converts the specified file to a series of QR code images. The images are saved in the output directory. Parameters: input_file <string> The file to convert to QR codes. output_directory <string> [Optional] The directory to save the generated QR codes to. This directory is created if it doesn't exist. ''' # Try to create the output directory if it doesn't exist. if not confirm_dir(output_directory): LOGGER.warn('Failed to create output directory: %s', output_directory) return with open_file(input_file, 'rb') as (f, err): # Bad file open handling if err: print('Unable to read input file {f}. More info below:'.format(f=input_file)) print('\t{e}'.format(e=err)) return # Read the file as binary and convert to b64 data = f.read() b64_data = b64encode(data).decode('ascii') b64_data_len = len(b64_data) # Split into chunks. # This is required to keep QR codes to a parseable size. num_bytes = DATA_PER_CHUNK_BYTES num_chunks = math.ceil(len(b64_data) / num_bytes) input_file_name = os.path.basename(input_file) print('Encoding file {f}...'.format(f=input_file_name)) LOGGER.debug('b64_data_len: %d', b64_data_len) LOGGER.debug('num_chunks: %d', num_chunks) LOGGER.debug('input_file_name: %s', input_file_name) # Write each chunk into a QR code for i in range(0, num_chunks): # Start and stop indicies of the b64 string for this chunk start_index = num_bytes * i end_index = num_bytes * (i + 1) LOGGER.debug('start_index: %d', start_index) LOGGER.debug('end_index: %d', end_index) # Construct payload to be placed into the QR code payload = { # len = 38 w/o name and data 'chunkNumber': i, # This chunk of the file 'totalChunks': num_chunks - 1, # Total chunks of the file 'name': input_file_name, # File name 'data': b64_data[start_index:end_index] # limit is ~650. Go 625 to be safe } # Dump to JSON with special separators to save space payload_json = json.dumps(payload, separators=(',',':')) LOGGER.debug('json dumps length {test}'.format(test=len(payload_json))) # Write QR code to file qr_file_name = '{file}_q{count}.png'.format(file=input_file_name, count=i) qr_file = os.path.join(output_directory, qr_file_name) LOGGER.debug('qr_file: %s', qr_file) write_qr_code(qr_file, payload_json) # Status msg print('Encoded file {f} in {n} QR codes.'.format(f=input_file_name, n=num_chunks))
def convert_file_to_qr(input_file, output_directory=''): ''' Converts the specified file to a series of QR code images. The images are saved in the output directory. Parameters: input_file <string> The file to convert to QR codes. output_directory <string> [Optional] The directory to save the generated QR codes to. This directory is created if it doesn't exist. ''' # Try to create the output directory if it doesn't exist. if not confirm_dir(output_directory): LOGGER.warn('Failed to create output directory: %s', output_directory) return with open_file(input_file, 'rb') as (f, err): # Bad file open handling if err: print('Unable to read input file {f}. More info below:'.format(f=input_file)) print('\t{e}'.format(e=err)) return # Read the file as binary and convert to b64 data = f.read() b64_data = b64encode(data).decode('ascii') b64_data_len = len(b64_data) # Split into chunks. # This is required to keep QR codes to a parseable size. num_bytes = DATA_PER_CHUNK_BYTES num_chunks = math.ceil(len(b64_data) / num_bytes) input_file_name = os.path.basename(input_file) print('Encoding file {f}...'.format(f=input_file_name)) LOGGER.debug('b64_data_len: %d', b64_data_len) LOGGER.debug('num_chunks: %d', num_chunks) LOGGER.debug('input_file_name: %s', input_file_name) # Write each chunk into a QR code for i in range(0, num_chunks): # Start and stop indicies of the b64 string for this chunk start_index = num_bytes * i end_index = num_bytes * (i + 1) LOGGER.debug('start_index: %d', start_index) LOGGER.debug('end_index: %d', end_index) # Construct payload to be placed into the QR code payload = { # len = 38 w/o name and data 'chunkNumber': i, # This chunk of the file 'totalChunks': num_chunks - 1, # Total chunks of the file 'name': input_file_name, # File name 'data': b64_data[start_index:end_index] # limit is ~650. Go 625 to be safe } # Dump to JSON with special separators to save space payload_json = json.dumps(payload, separators=(',',':')) LOGGER.debug('json dumps length {test}'.format(test=len(payload_json))) # Write QR code to file qr_file_name = '{file}_q{count}.png'.format(file=input_file_name, count=i) qr_file = os.path.join(output_directory, qr_file_name) LOGGER.debug('qr_file: %s', qr_file) write_qr_code(qr_file, payload_json) # Status msg print('Encoded file {f} in {n} QR codes.'.format(f=input_file_name, n=num_chunks))
Python
def _generate_examples_iter(self, holdout: str) -> Iterator[Example]: """Yield all project files, one project at a time""" def read_file(path): with open(path, "rt", encoding="utf-8", errors="ignore") as f: return f.read() bucket_to_shuffle: List[Example] = [] assert self._processed_projects is not None for project in self._processed_projects[holdout]: examples = ( Example(language, proj_name, filename, read_file(path)) for language, proj_name, filename, path in project ) bucket_to_shuffle.extend( example for example in examples if GitProjectExtractor._is_good_example(example.language, example.file_name, example.source_code) ) if len(bucket_to_shuffle) > _BUCKET_SIZE: self._rng.shuffle(bucket_to_shuffle) yield from bucket_to_shuffle bucket_to_shuffle = [] yield from bucket_to_shuffle
def _generate_examples_iter(self, holdout: str) -> Iterator[Example]: """Yield all project files, one project at a time""" def read_file(path): with open(path, "rt", encoding="utf-8", errors="ignore") as f: return f.read() bucket_to_shuffle: List[Example] = [] assert self._processed_projects is not None for project in self._processed_projects[holdout]: examples = ( Example(language, proj_name, filename, read_file(path)) for language, proj_name, filename, path in project ) bucket_to_shuffle.extend( example for example in examples if GitProjectExtractor._is_good_example(example.language, example.file_name, example.source_code) ) if len(bucket_to_shuffle) > _BUCKET_SIZE: self._rng.shuffle(bucket_to_shuffle) yield from bucket_to_shuffle bucket_to_shuffle = [] yield from bucket_to_shuffle
Python
def _process_projects( self, projects: List[Tuple[str, List[Tuple[str, str]]]] ) -> Tuple[List[List[Tuple[str, str, str, str]]], List[str], int]: """Search for projects, extract real project names from dataset :param projects: output of _get_files_projects. :return: a Tuple, first item of which is a List, each item of which represents a single GitHub project and is itself a List, each item of which represents a single file in the project which is written in the specified language and is itself a Tuple, first item of which is the path to a file in the project structure, the second one is the path to the file in our dataset structure the third one is the language of the file. second item is the length of projects list. """ processed_projects = [] skipped_projects = [] files_amount = 0 for project_name, files in projects: author, repo, branch, filename = files[0][0].split(os.sep)[-4:] paths_dict_path = os.path.join( self._path, "repositories", author, repo, branch, "paths.json", ) if os.path.exists(paths_dict_path): with open(paths_dict_path, "rt") as f: paths_dict = json.load(f) names_and_paths = [] for (file, lang) in files: if os.path.basename(file) in paths_dict: names_and_paths.append( ( lang, project_name, paths_dict[os.path.basename(file)], file, ) ) processed_projects.append(names_and_paths) files_amount += len(names_and_paths) else: skipped_projects.append(f"{author}/{repo}") return processed_projects, skipped_projects, files_amount
def _process_projects( self, projects: List[Tuple[str, List[Tuple[str, str]]]] ) -> Tuple[List[List[Tuple[str, str, str, str]]], List[str], int]: """Search for projects, extract real project names from dataset :param projects: output of _get_files_projects. :return: a Tuple, first item of which is a List, each item of which represents a single GitHub project and is itself a List, each item of which represents a single file in the project which is written in the specified language and is itself a Tuple, first item of which is the path to a file in the project structure, the second one is the path to the file in our dataset structure the third one is the language of the file. second item is the length of projects list. """ processed_projects = [] skipped_projects = [] files_amount = 0 for project_name, files in projects: author, repo, branch, filename = files[0][0].split(os.sep)[-4:] paths_dict_path = os.path.join( self._path, "repositories", author, repo, branch, "paths.json", ) if os.path.exists(paths_dict_path): with open(paths_dict_path, "rt") as f: paths_dict = json.load(f) names_and_paths = [] for (file, lang) in files: if os.path.basename(file) in paths_dict: names_and_paths.append( ( lang, project_name, paths_dict[os.path.basename(file)], file, ) ) processed_projects.append(names_and_paths) files_amount += len(names_and_paths) else: skipped_projects.append(f"{author}/{repo}") return processed_projects, skipped_projects, files_amount
Python
def to_torch(self, tokenizer: Tokenizer) -> Data: """Convert this graph into torch-geometric graph :param tokenizer: tokenizer to convert token parts into ids :return: """ node_tokens = [n.token for n in self.nodes] encoded = tokenizer.encode_batch(node_tokens) token = torch.tensor([enc.ids for enc in encoded], dtype=torch.long) node_type = torch.tensor([n.type.value for n in self.__nodes], dtype=torch.long) edge_index = torch.tensor(list(zip(*[[e.from_node.id, e.to_node.id] for e in self.__edges])), dtype=torch.long) edge_type = torch.tensor([e.type.value for e in self.__edges], dtype=torch.long) # save token to `x` so Data can calculate properties like `num_nodes` return Data(x=token, node_type=node_type, edge_index=edge_index, edge_type=edge_type)
def to_torch(self, tokenizer: Tokenizer) -> Data: """Convert this graph into torch-geometric graph :param tokenizer: tokenizer to convert token parts into ids :return: """ node_tokens = [n.token for n in self.nodes] encoded = tokenizer.encode_batch(node_tokens) token = torch.tensor([enc.ids for enc in encoded], dtype=torch.long) node_type = torch.tensor([n.type.value for n in self.__nodes], dtype=torch.long) edge_index = torch.tensor(list(zip(*[[e.from_node.id, e.to_node.id] for e in self.__edges])), dtype=torch.long) edge_type = torch.tensor([e.type.value for e in self.__edges], dtype=torch.long) # save token to `x` so Data can calculate properties like `num_nodes` return Data(x=token, node_type=node_type, edge_index=edge_index, edge_type=edge_type)
Python
def draw(self, height: int = 1000, width: int = 1000, notebook: bool = True) -> Network: """Visualize graph using [pyvis](https://pyvis.readthedocs.io/en/latest/) library :param graph: graph instance to visualize :param height: height of target visualization :param width: width of target visualization :param notebook: pass True if visualization should be displayed in notebook :return: pyvis Network instance """ net = Network(height=height, width=width, directed=True, notebook=notebook) net.barnes_hut(gravity=-10000, overlap=1, spring_length=1) for node in self.nodes: net.add_node(node.id, label=node.token, group=node.type.value, title=f"id: {node.id}\ntoken: {node.token}") for edge in self.edges: net.add_edge(edge.from_node.id, edge.to_node.id, label=edge.type.name, group=edge.type.value) return net
def draw(self, height: int = 1000, width: int = 1000, notebook: bool = True) -> Network: """Visualize graph using [pyvis](https://pyvis.readthedocs.io/en/latest/) library :param graph: graph instance to visualize :param height: height of target visualization :param width: width of target visualization :param notebook: pass True if visualization should be displayed in notebook :return: pyvis Network instance """ net = Network(height=height, width=width, directed=True, notebook=notebook) net.barnes_hut(gravity=-10000, overlap=1, spring_length=1) for node in self.nodes: net.add_node(node.id, label=node.token, group=node.type.value, title=f"id: {node.id}\ntoken: {node.token}") for edge in self.edges: net.add_edge(edge.from_node.id, edge.to_node.id, label=edge.type.name, group=edge.type.value) return net
Python
def build_graph( source_code, monitoring: Monitoring, type_lattice: TypeLatticeGenerator ) -> Tuple[Optional[List], Optional[List]]: """ Parses the code of a file into a custom abstract syntax tree. """ try: visitor = AstGraphGenerator(source_code, type_lattice) return visitor.build() except FaultyAnnotation as e: logger.warning("Faulty Annotation: ", e) logger.warning("at file: ", monitoring.file) except SyntaxError as e: monitoring.found_error(e, traceback.format_exc()) except Exception as e: logger.warning(traceback.format_exc()) monitoring.found_error(e, traceback.format_exc())
def build_graph( source_code, monitoring: Monitoring, type_lattice: TypeLatticeGenerator ) -> Tuple[Optional[List], Optional[List]]: """ Parses the code of a file into a custom abstract syntax tree. """ try: visitor = AstGraphGenerator(source_code, type_lattice) return visitor.build() except FaultyAnnotation as e: logger.warning("Faulty Annotation: ", e) logger.warning("at file: ", monitoring.file) except SyntaxError as e: monitoring.found_error(e, traceback.format_exc()) except Exception as e: logger.warning(traceback.format_exc()) monitoring.found_error(e, traceback.format_exc())
Python
def explore_files( root_dir: str, duplicates_to_remove: Set[str], monitoring: Monitoring, type_lattice: TypeLatticeGenerator, ) -> Iterator[Tuple]: """ Walks through the root_dir and process each file. """ for file_path in iglob(os.path.join(root_dir, "**", "*.py"), recursive=True): if file_path in duplicates_to_remove: logger.warning("Ignoring duplicate %s" % file_path) continue logger.warning(file_path) if not os.path.isfile(file_path): continue with open(file_path, encoding="utf-8", errors="ignore") as f: monitoring.increment_count() monitoring.enter_file(file_path) repo = file_path.replace(root_dir, "").split("/")[0] if monitoring.current_repo != repo: monitoring.enter_repo(repo) type_lattice.build_graph() graph = build_graph(f.read(), monitoring, type_lattice) if graph is None or len(graph["supernodes"]) == 0: continue graph["filename"] = file_path[len(root_dir) :] yield graph
def explore_files( root_dir: str, duplicates_to_remove: Set[str], monitoring: Monitoring, type_lattice: TypeLatticeGenerator, ) -> Iterator[Tuple]: """ Walks through the root_dir and process each file. """ for file_path in iglob(os.path.join(root_dir, "**", "*.py"), recursive=True): if file_path in duplicates_to_remove: logger.warning("Ignoring duplicate %s" % file_path) continue logger.warning(file_path) if not os.path.isfile(file_path): continue with open(file_path, encoding="utf-8", errors="ignore") as f: monitoring.increment_count() monitoring.enter_file(file_path) repo = file_path.replace(root_dir, "").split("/")[0] if monitoring.current_repo != repo: monitoring.enter_repo(repo) type_lattice.build_graph() graph = build_graph(f.read(), monitoring, type_lattice) if graph is None or len(graph["supernodes"]) == 0: continue graph["filename"] = file_path[len(root_dir) :] yield graph
Python
def __cook_references(self, references: List[str]) -> Tuple[List[int], Dict[Tuple[str, ...], int]]: """Takes a list of reference sentences for a single segment and returns an object that encapsulates everything that BLEU needs to know about them. """ max_counts: Dict[Tuple[str, ...], int] = {} for ref in references: counts = self.__count_ngrams(self.__normalize(ref)) for (n_gram, count) in counts.items(): max_counts[n_gram] = max(max_counts.get(n_gram, 0), count) return [len(ref) for ref in references], max_counts
def __cook_references(self, references: List[str]) -> Tuple[List[int], Dict[Tuple[str, ...], int]]: """Takes a list of reference sentences for a single segment and returns an object that encapsulates everything that BLEU needs to know about them. """ max_counts: Dict[Tuple[str, ...], int] = {} for ref in references: counts = self.__count_ngrams(self.__normalize(ref)) for (n_gram, count) in counts.items(): max_counts[n_gram] = max(max_counts.get(n_gram, 0), count) return [len(ref) for ref in references], max_counts
Python
def __cook_hypothesis(self, hypothesis: str, references: Tuple[List[int], Dict[Tuple[str, ...], int]]): """Takes a test sentence and returns an object that encapsulates everything that BLEU needs to know about it.""" (ref_lens, ref_max_counts) = references hypothesis_words = self.__normalize(hypothesis) result: Dict[str, Any] = {"test_len": len(hypothesis_words)} # Calculate effective reference sentence length. if self.__eff_ref_len == "shortest": result["ref_len"] = min(ref_lens) elif self._eff_ref_len == "average": result["ref_len"] = float(sum(ref_lens)) / len(ref_lens) elif self._eff_ref_len == "closest": min_diff = None for ref_len in ref_lens: if min_diff is None or abs(ref_len - len(hypothesis_words)) < min_diff: min_diff = abs(ref_len - len(hypothesis_words)) result["ref_len"] = ref_len else: raise NotImplementedError(f"Unknown value for effective reference sentence length: {self.__eff_ref_len}") result["guess"] = [max(len(hypothesis_words) - k + 1, 0) for k in range(1, self.__n_grams + 1)] result["correct"] = [0] * self.__n_grams counts = self.__count_ngrams(hypothesis_words) for (n_gram, count) in counts.items(): result["correct"][len(n_gram) - 1] += min(ref_max_counts.get(n_gram, 0), count) return result
def __cook_hypothesis(self, hypothesis: str, references: Tuple[List[int], Dict[Tuple[str, ...], int]]): """Takes a test sentence and returns an object that encapsulates everything that BLEU needs to know about it.""" (ref_lens, ref_max_counts) = references hypothesis_words = self.__normalize(hypothesis) result: Dict[str, Any] = {"test_len": len(hypothesis_words)} # Calculate effective reference sentence length. if self.__eff_ref_len == "shortest": result["ref_len"] = min(ref_lens) elif self._eff_ref_len == "average": result["ref_len"] = float(sum(ref_lens)) / len(ref_lens) elif self._eff_ref_len == "closest": min_diff = None for ref_len in ref_lens: if min_diff is None or abs(ref_len - len(hypothesis_words)) < min_diff: min_diff = abs(ref_len - len(hypothesis_words)) result["ref_len"] = ref_len else: raise NotImplementedError(f"Unknown value for effective reference sentence length: {self.__eff_ref_len}") result["guess"] = [max(len(hypothesis_words) - k + 1, 0) for k in range(1, self.__n_grams + 1)] result["correct"] = [0] * self.__n_grams counts = self.__count_ngrams(hypothesis_words) for (n_gram, count) in counts.items(): result["correct"][len(n_gram) - 1] += min(ref_max_counts.get(n_gram, 0), count) return result
Python
def _order_by_required_first(self, parameter): """Helper to order by whether a parameter is required""" if parameter.get("optional", False): return 1 else: return 0
def _order_by_required_first(self, parameter): """Helper to order by whether a parameter is required""" if parameter.get("optional", False): return 1 else: return 0
Python
def find_previous_id(self, element): """Get the first ID from a previous sibling""" id_element = next(( id_element for id_element in filter(None, ( sibling for parent in reversed(element.find_parents()) for sibling in parent.find_previous_siblings(None, {'id': True}) )) if id_element.name != "input" ), None) if not id_element: return '' return id_element.attrs['id']
def find_previous_id(self, element): """Get the first ID from a previous sibling""" id_element = next(( id_element for id_element in filter(None, ( sibling for parent in reversed(element.find_parents()) for sibling in parent.find_previous_siblings(None, {'id': True}) )) if id_element.name != "input" ), None) if not id_element: return '' return id_element.attrs['id']
Python
def load_documentation(self, path): """Load the documentation from a page""" with open(path, "rb") as f: source = f.read().decode() return source
def load_documentation(self, path): """Load the documentation from a page""" with open(path, "rb") as f: source = f.read().decode() return source
Python
def parse_commands(self, title): """Parse the commands from the section title""" if ':' in title: commands_str = title.split(':')[0].strip() else: match = self.RE_COMMAND.match(title) if not match: return None commands_str = match.group(1) if '&' in commands_str: commands = [ command.strip() for command in commands_str.split('&') ] elif '..' in commands_str: start_str, end_str = commands_str.replace('G', '').split('..') start, end = int(start_str), int(end_str) commands = [ f"G{number}" for number in range(start, end + 1) ] else: commands = [commands_str] return commands
def parse_commands(self, title): """Parse the commands from the section title""" if ':' in title: commands_str = title.split(':')[0].strip() else: match = self.RE_COMMAND.match(title) if not match: return None commands_str = match.group(1) if '&' in commands_str: commands = [ command.strip() for command in commands_str.split('&') ] elif '..' in commands_str: start_str, end_str = commands_str.replace('G', '').split('..') start, end = int(start_str), int(end_str) commands = [ f"G{number}" for number in range(start, end + 1) ] else: commands = [commands_str] return commands
Python
def parse_sections(self, lines): """Parse the sections from the page lines""" section_indexes = [ index for index, line in enumerate(lines) if line.startswith(';') ] if not section_indexes: return [ { "title": "", "lines": lines, }, ] return [ { "title": "", "lines": lines[:section_indexes[0]], }, ] + [ { "title": lines[index], "lines": lines[index + 1:section_index], } for index, section_index in zip(section_indexes, section_indexes[1:] + [len(lines)]) ]
def parse_sections(self, lines): """Parse the sections from the page lines""" section_indexes = [ index for index, line in enumerate(lines) if line.startswith(';') ] if not section_indexes: return [ { "title": "", "lines": lines, }, ] return [ { "title": "", "lines": lines[:section_indexes[0]], }, ] + [ { "title": lines[index], "lines": lines[index + 1:section_index], } for index, section_index in zip(section_indexes, section_indexes[1:] + [len(lines)]) ]
Python
def update_documentation( self, directories=None, output_directory=None, chatty=True, ): """Update the documentation and populate the output folder""" if chatty: parser_count = len(DocumentationUpdater.PARSER_REGISTRY.PARSERS) print(f"Updating using {parser_count} parsers") if output_directory is None: output_directory = Path(__file__).parent / "output" codes_list = [] ids_to_update = set() if not self.PARSER_REGISTRY.PARSERS: raise Exception(f"No parsers have been registered") for _id, parser in self.PARSER_REGISTRY.PARSERS.items(): if directories is None: directory = None else: if _id not in directories: continue directory = directories[_id] gcodes = parser().load_and_parse_all_codes(directory) self.attach_id_to_docs(gcodes) codes_list.append(gcodes) ids_to_update.add(parser.ID) if not codes_list: raise Exception("No sources set to be updated") if set(self.PARSER_REGISTRY.PARSERS) - ids_to_update: all_codes = self.load_existing_codes( ids_to_update, output_directory, ) else: all_codes = {} self.merge_codes(all_codes, codes_list) self.sort_codes(all_codes) self.save_codes_to_js(all_codes, output_directory) if chatty: code_count = len(all_codes) definition_count = sum(map(len, all_codes.values())) source_count = len({ definition['source'] for definitions in all_codes.values() for definition in definitions }) print( f"Parsed {code_count} codes, " f"with {definition_count} definitions in total, " f"from {source_count} sources" ) return all_codes
def update_documentation( self, directories=None, output_directory=None, chatty=True, ): """Update the documentation and populate the output folder""" if chatty: parser_count = len(DocumentationUpdater.PARSER_REGISTRY.PARSERS) print(f"Updating using {parser_count} parsers") if output_directory is None: output_directory = Path(__file__).parent / "output" codes_list = [] ids_to_update = set() if not self.PARSER_REGISTRY.PARSERS: raise Exception(f"No parsers have been registered") for _id, parser in self.PARSER_REGISTRY.PARSERS.items(): if directories is None: directory = None else: if _id not in directories: continue directory = directories[_id] gcodes = parser().load_and_parse_all_codes(directory) self.attach_id_to_docs(gcodes) codes_list.append(gcodes) ids_to_update.add(parser.ID) if not codes_list: raise Exception("No sources set to be updated") if set(self.PARSER_REGISTRY.PARSERS) - ids_to_update: all_codes = self.load_existing_codes( ids_to_update, output_directory, ) else: all_codes = {} self.merge_codes(all_codes, codes_list) self.sort_codes(all_codes) self.save_codes_to_js(all_codes, output_directory) if chatty: code_count = len(all_codes) definition_count = sum(map(len, all_codes.values())) source_count = len({ definition['source'] for definitions in all_codes.values() for definition in definitions }) print( f"Parsed {code_count} codes, " f"with {definition_count} definitions in total, " f"from {source_count} sources" ) return all_codes
Python
def attach_id_to_docs(self, codes): """Attach a unique ID to each definition""" for code in list(codes): codes[code] = [ dict(value, **{ "id": value.get("id", f"{value['source']}.{code}[{index}]") }) for index, value in enumerate(codes[code]) ]
def attach_id_to_docs(self, codes): """Attach a unique ID to each definition""" for code in list(codes): codes[code] = [ dict(value, **{ "id": value.get("id", f"{value['source']}.{code}[{index}]") }) for index, value in enumerate(codes[code]) ]
Python
def load_existing_codes(self, ids_to_update, output_directory): """Load existing codes from a previous run""" path = output_directory / f"all_codes{self.OUTPUT_PREFIXES['.json']}" expected_prefix = self.OUTPUT_PREFIXES[".json"] with open(path) as f: prefix = f.read(len(expected_prefix)) if prefix != expected_prefix: raise Exception( f"Prefix in JS file ('{prefix}') didn't match expected " f"prefix ('{expected_prefix}')") all_codes = json.load(f) sources_to_update = [ self.PARSER_REGISTRY.PARSERS[_id].SOURCE for _id in ids_to_update ] for code, values in list(all_codes.items()): all_codes[code] = [ value for value in values if value["source"] not in sources_to_update ] return all_codes
def load_existing_codes(self, ids_to_update, output_directory): """Load existing codes from a previous run""" path = output_directory / f"all_codes{self.OUTPUT_PREFIXES['.json']}" expected_prefix = self.OUTPUT_PREFIXES[".json"] with open(path) as f: prefix = f.read(len(expected_prefix)) if prefix != expected_prefix: raise Exception( f"Prefix in JS file ('{prefix}') didn't match expected " f"prefix ('{expected_prefix}')") all_codes = json.load(f) sources_to_update = [ self.PARSER_REGISTRY.PARSERS[_id].SOURCE for _id in ids_to_update ] for code, values in list(all_codes.items()): all_codes[code] = [ value for value in values if value["source"] not in sources_to_update ] return all_codes
Python
def load_and_parse_all_codes(self, directory): """ Return a dictionary of code => doc_items The doc_item should have these attributes: * title: a short name for the command * brief: a description for what the command does * parameters: a list of parameters description A parameter should have these attributes: * tag: the name of the parameter (eg 'X', 'Y', etc), that will be used when matching a user command * label: a label about it's value to be displayed to the user (eg '[X<pos>]') """ raise NotImplementedError()
def load_and_parse_all_codes(self, directory): """ Return a dictionary of code => doc_items The doc_item should have these attributes: * title: a short name for the command * brief: a description for what the command does * parameters: a list of parameters description A parameter should have these attributes: * tag: the name of the parameter (eg 'X', 'Y', etc), that will be used when matching a user command * label: a label about it's value to be displayed to the user (eg '[X<pos>]') """ raise NotImplementedError()
Python
def populate_temporary_directory(self, directory): """ Populate a temporary directory with the necessary documentation source. """ raise NotImplementedError()
def populate_temporary_directory(self, directory): """ Populate a temporary directory with the necessary documentation source. """ raise NotImplementedError()
Python
def _verify(self, data): """Verifies some data against the tag""" if self._algo == "none": return elif self._algo == "sha256": self._verifier_sha256(data) else: raise RuntimeError(f"Unexpected verifier algorithm (got { self._algo })")
def _verify(self, data): """Verifies some data against the tag""" if self._algo == "none": return elif self._algo == "sha256": self._verifier_sha256(data) else: raise RuntimeError(f"Unexpected verifier algorithm (got { self._algo })")
Python
def _verifier_sha256(self, data): """Verifies a SHA-256 digest""" # Compute the hash sha256 = hashlib.sha256() sha256.update(data) tag = sha256.hexdigest() # Validate the digest if tag != self._tag: raise RuntimeError(f"Unexpected checksum (expected { self._tag }; got { tag })")
def _verifier_sha256(self, data): """Verifies a SHA-256 digest""" # Compute the hash sha256 = hashlib.sha256() sha256.update(data) tag = sha256.hexdigest() # Validate the digest if tag != self._tag: raise RuntimeError(f"Unexpected checksum (expected { self._tag }; got { tag })")
Python
def fetch(self): """Downloads, verifies and extracts the tarball; returns a handle to `self` for method chaining""" # Fetch the tarball tarball_data = UriResource(self._uri).fetch() self._tempfile.write(tarball_data) self._tempfile.seek(0) # Extract archive and find sourcedir tarball = tarfile.open(fileobj=self._tempfile) tarball.extractall(path=self._tempdir.name) self._srcdir = self._find_srcdir() return self
def fetch(self): """Downloads, verifies and extracts the tarball; returns a handle to `self` for method chaining""" # Fetch the tarball tarball_data = UriResource(self._uri).fetch() self._tempfile.write(tarball_data) self._tempfile.seek(0) # Extract archive and find sourcedir tarball = tarfile.open(fileobj=self._tempfile) tarball.extractall(path=self._tempdir.name) self._srcdir = self._find_srcdir() return self
Python
def _defParseDatetime(self, time): """ allow different ways to provide a time and pares it to datetime """ if time: if isinstance(time, int): return datetime.now() + timedelta(minutes=time) elif isinstance(time, datetime): return time elif isinstance(time, (timedelta, )): return datetime.now() + time elif isinstance(time, types.StringTypes): # TODO return datetime.now() else: raise ValueError('Unable to parse %s as datetime' % time) else: return datetime.now()
def _defParseDatetime(self, time): """ allow different ways to provide a time and pares it to datetime """ if time: if isinstance(time, int): return datetime.now() + timedelta(minutes=time) elif isinstance(time, datetime): return time elif isinstance(time, (timedelta, )): return datetime.now() + time elif isinstance(time, types.StringTypes): # TODO return datetime.now() else: raise ValueError('Unable to parse %s as datetime' % time) else: return datetime.now()
Python
def _encodeRequest(self, request, data=None): """ encode the request parameters in the expected way """ if isinstance(request, (list, tuple)): request = "".join(request) if data: resStr = request % data else: resStr = request # log.debug('DATA RAW: %s' % resStr) res = urllib.parse.quote(resStr).replace('%26', '&').replace('%2B', '+').replace('%3D', '=').replace('/', '%2F') # log.debug('DATA ENC: %s' % res) return res
def _encodeRequest(self, request, data=None): """ encode the request parameters in the expected way """ if isinstance(request, (list, tuple)): request = "".join(request) if data: resStr = request % data else: resStr = request # log.debug('DATA RAW: %s' % resStr) res = urllib.parse.quote(resStr).replace('%26', '&').replace('%2B', '+').replace('%3D', '=').replace('/', '%2F') # log.debug('DATA ENC: %s' % res) return res
Python
def _getConnectionParams(self, stationFrom, stationTo, conTime): """ builds parameter structur for connection call """ transport = list(self.TRANSPORTMAP.keys()) res = [ 'results[5][5][function]=ws_find_connections&results[5][5][data]=[', '{"name":"results[5][5][is_extended]","value":""},', '{"name":"results[5][5][from_opt]","value":"3"},', '{"name":"results[5][5][from]","value":"%(from)s"},', '{"name":"results[5][5][from_lat]","value":""},', '{"name":"results[5][5][from_lng]","value":""},', '{"name":"results[5][5][to_opt]","value":"3"},', '{"name":"results[5][5][to]","value":"%(to)s"},', '{"name":"results[5][5][to_lat]","value":""},', '{"name":"results[5][5][to_lng]","value":""},', '{"name":"results[5][5][via]","value":""},', '{"name":"results[5][5][via_lat]","value":""},', '{"name":"results[5][5][via_lng]","value":""},', '{"name":"results[5][5][time_mode]","value":"departure"},', '{"name":"results[5][5][time]","value":"%(time)s"},', '{"name":"results[5][5][date]","value":"%(date)s"},', ] # stationFrom = stationFrom.encode('utf-8') if isinstance(stationFrom, str) else stationFrom # stationTo = stationTo.encode('utf-8') if isinstance(stationTo, str) else stationTo for atransport in transport: res.append('{"name":"results[5][2][means_of_transport][]","value":"%s"},' % atransport) res.append('{"name":"results[5][2][mode]","value":"connection"}]') return self._encodeRequest(res, { 'from': stationFrom.replace(' ', '+'), 'to': stationTo.replace(' ', '+'), 'time': conTime.strftime('%H:%M'), 'date': conTime.strftime('%d.%m.%Y'), })
def _getConnectionParams(self, stationFrom, stationTo, conTime): """ builds parameter structur for connection call """ transport = list(self.TRANSPORTMAP.keys()) res = [ 'results[5][5][function]=ws_find_connections&results[5][5][data]=[', '{"name":"results[5][5][is_extended]","value":""},', '{"name":"results[5][5][from_opt]","value":"3"},', '{"name":"results[5][5][from]","value":"%(from)s"},', '{"name":"results[5][5][from_lat]","value":""},', '{"name":"results[5][5][from_lng]","value":""},', '{"name":"results[5][5][to_opt]","value":"3"},', '{"name":"results[5][5][to]","value":"%(to)s"},', '{"name":"results[5][5][to_lat]","value":""},', '{"name":"results[5][5][to_lng]","value":""},', '{"name":"results[5][5][via]","value":""},', '{"name":"results[5][5][via_lat]","value":""},', '{"name":"results[5][5][via_lng]","value":""},', '{"name":"results[5][5][time_mode]","value":"departure"},', '{"name":"results[5][5][time]","value":"%(time)s"},', '{"name":"results[5][5][date]","value":"%(date)s"},', ] # stationFrom = stationFrom.encode('utf-8') if isinstance(stationFrom, str) else stationFrom # stationTo = stationTo.encode('utf-8') if isinstance(stationTo, str) else stationTo for atransport in transport: res.append('{"name":"results[5][2][means_of_transport][]","value":"%s"},' % atransport) res.append('{"name":"results[5][2][mode]","value":"connection"}]') return self._encodeRequest(res, { 'from': stationFrom.replace(' ', '+'), 'to': stationTo.replace(' ', '+'), 'time': conTime.strftime('%H:%M'), 'date': conTime.strftime('%d.%m.%Y'), })
Python
def _getStationParams(self, stop, time): """ build paramter structure for station request """ res = [ 'results[5][5][function]=ws_info_stop&results[5][5][data]=[', '{"name":"results[5][5][stop]","value":"%(stop)s"},', '{"name":"results[5][5][date]","value":"%(date)s"},', '{"name":"results[5][5][time]","value":"%(time)s"},', '{"name":"results[5][5][mode]","value":"stop"}]' ] # stop = stop.encode('utf-8') if isinstance(stop, str) else stop data = { 'stop': stop.replace(' ', '+'), 'date': time.strftime('%d.%m.%Y'), 'time': time.strftime('%H:%M'), } # log.debug('PARMS: %s' % pformat('\n'.join(res) % data)) return self._encodeRequest(res, data)
def _getStationParams(self, stop, time): """ build paramter structure for station request """ res = [ 'results[5][5][function]=ws_info_stop&results[5][5][data]=[', '{"name":"results[5][5][stop]","value":"%(stop)s"},', '{"name":"results[5][5][date]","value":"%(date)s"},', '{"name":"results[5][5][time]","value":"%(time)s"},', '{"name":"results[5][5][mode]","value":"stop"}]' ] # stop = stop.encode('utf-8') if isinstance(stop, str) else stop data = { 'stop': stop.replace(' ', '+'), 'date': time.strftime('%d.%m.%Y'), 'time': time.strftime('%H:%M'), } # log.debug('PARMS: %s' % pformat('\n'.join(res) % data)) return self._encodeRequest(res, data)
Python
def play(): """Gets players' data and runs the game""" print('\nWelcome, strangers!\n', '\nPlease, define your grid \ \n''3 3'' for example') print('\nRemember, you input the width first!') # Importing 'Board' class settings board = Board() # Setting size of grid grid_size = list(map(int, input().split())) width, height = grid_size[0], grid_size[1] board.build_grid(width, height) board.print_grid() print('You will need to choose a column and a row number to make a move' '\nGood luck!') # Setting turns' counter turn = 0 # Defining size of a win row win_row_size = min(grid_size) print('"X" player, introduce yourself') player_1 = input() print('"O" player, introduce yourself') player_2 = input() winner = None # Setting game flag game = True while game: turn += 1 board.print_grid() print(board.define_player(player_1, player_2, turn) + ',' '\nYour turn!') # Getting player's move coordinates x_coordinate, y_coordinate = board.get_coordinates() board.update_grid(x_coordinate, y_coordinate, turn) if board.check_for_winner(x_coordinate, turn, win_row_size): board.print_grid() winner = player_2 if turn % 2 == 0 else player_1 print('GAME OVER') print(winner, 'wins') game = not game elif board.check_for_draw(): board.print_grid() print('GAME OVER') print('Friendship wins') game = not game
def play(): """Gets players' data and runs the game""" print('\nWelcome, strangers!\n', '\nPlease, define your grid \ \n''3 3'' for example') print('\nRemember, you input the width first!') # Importing 'Board' class settings board = Board() # Setting size of grid grid_size = list(map(int, input().split())) width, height = grid_size[0], grid_size[1] board.build_grid(width, height) board.print_grid() print('You will need to choose a column and a row number to make a move' '\nGood luck!') # Setting turns' counter turn = 0 # Defining size of a win row win_row_size = min(grid_size) print('"X" player, introduce yourself') player_1 = input() print('"O" player, introduce yourself') player_2 = input() winner = None # Setting game flag game = True while game: turn += 1 board.print_grid() print(board.define_player(player_1, player_2, turn) + ',' '\nYour turn!') # Getting player's move coordinates x_coordinate, y_coordinate = board.get_coordinates() board.update_grid(x_coordinate, y_coordinate, turn) if board.check_for_winner(x_coordinate, turn, win_row_size): board.print_grid() winner = player_2 if turn % 2 == 0 else player_1 print('GAME OVER') print(winner, 'wins') game = not game elif board.check_for_draw(): board.print_grid() print('GAME OVER') print('Friendship wins') game = not game
Python
def check_for_draw(self) -> bool: """Checks grid to be totally filled""" for row in self.rows: for elem in row: if elem == '': return False return True
def check_for_draw(self) -> bool: """Checks grid to be totally filled""" for row in self.rows: for elem in row: if elem == '': return False return True
Python
def check_coordinates(self, x: str, y: str) -> bool: """Checks whether the first element is suitable letter and second element is in the grid's borders """ if (x not in self.row_names or not y.isdigit() or int(y) > len(self.rows) or int(y) <= 0): print('Incorrect letter of a column or number of a line') return False return True
def check_coordinates(self, x: str, y: str) -> bool: """Checks whether the first element is suitable letter and second element is in the grid's borders """ if (x not in self.row_names or not y.isdigit() or int(y) > len(self.rows) or int(y) <= 0): print('Incorrect letter of a column or number of a line') return False return True
Python
def print_grid(self): """Converts and prints the grid in "PrettyTable" view""" grid = PrettyTable() # Getting letters for column names grid.field_names = self.row_names # Adding the matrix with new data grid.add_rows(self.rows) # lign the numbering of lines grid.align[''] = 'l' return print(grid)
def print_grid(self): """Converts and prints the grid in "PrettyTable" view""" grid = PrettyTable() # Getting letters for column names grid.field_names = self.row_names # Adding the matrix with new data grid.add_rows(self.rows) # lign the numbering of lines grid.align[''] = 'l' return print(grid)
Python
def vgg16_mura_model(path): """Get a vgg16 model. The model can classify bone X-rays into three categories: wrist, shoulder and elbow. Args: path: A string, if there's no model in the path, it will download the weights automatically. Return: A tf.keras model object. """ model_path = path if os.path.exists(model_path): model = load_model(model_path) else: print("downloading the weights of model to", path, "...") _download_file_from_google_drive( "175QH-aIvlLvxrUGyCEpfQAQ5qiVfE_s5", model_path) print("done.") model = load_model(model_path) return model
def vgg16_mura_model(path): """Get a vgg16 model. The model can classify bone X-rays into three categories: wrist, shoulder and elbow. Args: path: A string, if there's no model in the path, it will download the weights automatically. Return: A tf.keras model object. """ model_path = path if os.path.exists(model_path): model = load_model(model_path) else: print("downloading the weights of model to", path, "...") _download_file_from_google_drive( "175QH-aIvlLvxrUGyCEpfQAQ5qiVfE_s5", model_path) print("done.") model = load_model(model_path) return model
Python
def preprocess_image(img_path, target_size=(224, 224)): """Preprocess the image by reshape and normalization. Args: img_path: A string. target_size: A tuple, reshape to this size. Return: An image ndarray. """ img = image.load_img(img_path, target_size=target_size) img = image.img_to_array(img) img /= 255 return img
def preprocess_image(img_path, target_size=(224, 224)): """Preprocess the image by reshape and normalization. Args: img_path: A string. target_size: A tuple, reshape to this size. Return: An image ndarray. """ img = image.load_img(img_path, target_size=target_size) img = image.img_to_array(img) img /= 255 return img
Python
def show_imgwithheat(img_path, heatmap, alpha=0.4, return_array=False): """Show the image with heatmap. Args: img_path: string. heatmap: image array, get it by calling grad_cam(). alpha: float, transparency of heatmap. return_array: bool, return a superimposed image array or not. Return: None or image array. """ img = cv2.imread(img_path) heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0])) heatmap = (heatmap*255).astype("uint8") heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET) superimposed_img = heatmap * alpha + img superimposed_img = np.clip(superimposed_img, 0, 255).astype("uint8") superimposed_img = cv2.cvtColor(superimposed_img, cv2.COLOR_BGR2RGB) imgwithheat = Image.fromarray(superimposed_img) display(imgwithheat) if return_array: return superimposed_img
def show_imgwithheat(img_path, heatmap, alpha=0.4, return_array=False): """Show the image with heatmap. Args: img_path: string. heatmap: image array, get it by calling grad_cam(). alpha: float, transparency of heatmap. return_array: bool, return a superimposed image array or not. Return: None or image array. """ img = cv2.imread(img_path) heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0])) heatmap = (heatmap*255).astype("uint8") heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET) superimposed_img = heatmap * alpha + img superimposed_img = np.clip(superimposed_img, 0, 255).astype("uint8") superimposed_img = cv2.cvtColor(superimposed_img, cv2.COLOR_BGR2RGB) imgwithheat = Image.fromarray(superimposed_img) display(imgwithheat) if return_array: return superimposed_img
Python
def define_bin_edges(self, x1, x2=None, weights=None, cache=True): """Given data, return the edges of the histogram bins.""" if x2 is None: bin_edges = self._define_bin_edges( x1, weights, self.bins, self.binwidth, self.binrange, self.discrete, ) else: bin_edges = [] for i, x in enumerate([x1, x2]): # Resolve out whether bin parameters are shared # or specific to each variable bins = self.bins if bins is None or isinstance(bins, (str, Number)): pass elif isinstance(bins[i], str): bins = bins[i] elif len(bins) == 2: bins = bins[i] binwidth = self.binwidth if binwidth is None: pass elif not isinstance(binwidth, Number): binwidth = binwidth[i] binrange = self.binrange if binrange is None: pass elif not isinstance(binrange[0], Number): binrange = binrange[i] discrete = self.discrete if not isinstance(discrete, bool): discrete = discrete[i] # Define the bins for this variable bin_edges.append(self._define_bin_edges( x, weights, bins, binwidth, binrange, discrete, )) bin_edges = tuple(bin_edges) if cache: self.bin_edges = bin_edges return bin_edges
def define_bin_edges(self, x1, x2=None, weights=None, cache=True): """Given data, return the edges of the histogram bins.""" if x2 is None: bin_edges = self._define_bin_edges( x1, weights, self.bins, self.binwidth, self.binrange, self.discrete, ) else: bin_edges = [] for i, x in enumerate([x1, x2]): # Resolve out whether bin parameters are shared # or specific to each variable bins = self.bins if bins is None or isinstance(bins, (str, Number)): pass elif isinstance(bins[i], str): bins = bins[i] elif len(bins) == 2: bins = bins[i] binwidth = self.binwidth if binwidth is None: pass elif not isinstance(binwidth, Number): binwidth = binwidth[i] binrange = self.binrange if binrange is None: pass elif not isinstance(binrange[0], Number): binrange = binrange[i] discrete = self.discrete if not isinstance(discrete, bool): discrete = discrete[i] # Define the bins for this variable bin_edges.append(self._define_bin_edges( x, weights, bins, binwidth, binrange, discrete, )) bin_edges = tuple(bin_edges) if cache: self.bin_edges = bin_edges return bin_edges
Python
def strip_output(nb, keep_output=False, keep_count=False, extra_keys=''): """ Strip the outputs, execution count/prompt number and miscellaneous metadata from a notebook object, unless specified to keep either the outputs or counts. `extra_keys` could be 'metadata.foo cell.metadata.bar metadata.baz' """ if keep_output is None and 'keep_output' in nb.metadata: keep_output = bool(nb.metadata['keep_output']) if hasattr(extra_keys, 'decode'): extra_keys = extra_keys.decode() extra_keys = extra_keys.split() keys = {'metadata': [], 'cell': {'metadata': []}} for key in extra_keys: if key.startswith('metadata.'): keys['metadata'].append(key[len('metadata.'):]) elif key.startswith('cell.metadata.'): keys['cell']['metadata'].append(key[len('cell.metadata.'):]) else: sys.stderr.write('ignoring extra key `%s`' % key) nb.metadata.pop('signature', None) nb.metadata.pop('widgets', None) for field in keys['metadata']: pop_recursive(nb.metadata, field) for cell in _cells(nb): keep_output_this_cell = determine_keep_output(cell, keep_output) # Remove the outputs, unless directed otherwise if 'outputs' in cell: # Default behavior strips outputs. With all outputs stripped, # there are no counts to keep and keep_count is ignored. if not keep_output_this_cell: cell['outputs'] = [] # If keep_output_this_cell, but not keep_count, strip the counts # from the output. if keep_output_this_cell and not keep_count: for output in cell['outputs']: if 'execution_count' in output: output['execution_count'] = None # If keep_output_this_cell and keep_count, do nothing. # Remove the prompt_number/execution_count, unless directed otherwise if 'prompt_number' in cell and not keep_count: cell['prompt_number'] = None if 'execution_count' in cell and not keep_count: cell['execution_count'] = None # Always remove this metadata for output_style in ['collapsed', 'scrolled']: if output_style in cell.metadata: cell.metadata[output_style] = False if 'metadata' in cell: for field in ['collapsed', 'scrolled', 'ExecuteTime']: cell.metadata.pop(field, None) for (extra, fields) in keys['cell'].items(): if extra in cell: for field in fields: pop_recursive(getattr(cell, extra), field) return nb
def strip_output(nb, keep_output=False, keep_count=False, extra_keys=''): """ Strip the outputs, execution count/prompt number and miscellaneous metadata from a notebook object, unless specified to keep either the outputs or counts. `extra_keys` could be 'metadata.foo cell.metadata.bar metadata.baz' """ if keep_output is None and 'keep_output' in nb.metadata: keep_output = bool(nb.metadata['keep_output']) if hasattr(extra_keys, 'decode'): extra_keys = extra_keys.decode() extra_keys = extra_keys.split() keys = {'metadata': [], 'cell': {'metadata': []}} for key in extra_keys: if key.startswith('metadata.'): keys['metadata'].append(key[len('metadata.'):]) elif key.startswith('cell.metadata.'): keys['cell']['metadata'].append(key[len('cell.metadata.'):]) else: sys.stderr.write('ignoring extra key `%s`' % key) nb.metadata.pop('signature', None) nb.metadata.pop('widgets', None) for field in keys['metadata']: pop_recursive(nb.metadata, field) for cell in _cells(nb): keep_output_this_cell = determine_keep_output(cell, keep_output) # Remove the outputs, unless directed otherwise if 'outputs' in cell: # Default behavior strips outputs. With all outputs stripped, # there are no counts to keep and keep_count is ignored. if not keep_output_this_cell: cell['outputs'] = [] # If keep_output_this_cell, but not keep_count, strip the counts # from the output. if keep_output_this_cell and not keep_count: for output in cell['outputs']: if 'execution_count' in output: output['execution_count'] = None # If keep_output_this_cell and keep_count, do nothing. # Remove the prompt_number/execution_count, unless directed otherwise if 'prompt_number' in cell and not keep_count: cell['prompt_number'] = None if 'execution_count' in cell and not keep_count: cell['execution_count'] = None # Always remove this metadata for output_style in ['collapsed', 'scrolled']: if output_style in cell.metadata: cell.metadata[output_style] = False if 'metadata' in cell: for field in ['collapsed', 'scrolled', 'ExecuteTime']: cell.metadata.pop(field, None) for (extra, fields) in keys['cell'].items(): if extra in cell: for field in fields: pop_recursive(getattr(cell, extra), field) return nb
Python
def add_legend_data(self, ax): """Add labeled artists to represent the different plot semantics.""" verbosity = self.legend if verbosity not in ["brief", "full"]: err = "`legend` must be 'brief', 'full', or False" raise ValueError(err) legend_kwargs = {} keys = [] title_kws = dict(color="w", s=0, linewidth=0, marker="", dashes="") def update(var_name, val_name, **kws): key = var_name, val_name if key in legend_kwargs: legend_kwargs[key].update(**kws) else: keys.append(key) legend_kwargs[key] = dict(**kws) # -- Add a legend for hue semantics if verbosity == "brief" and self._hue_map.map_type == "numeric": if isinstance(self._hue_map.norm, mpl.colors.LogNorm): locator = mpl.ticker.LogLocator(numticks=3) else: locator = mpl.ticker.MaxNLocator(nbins=3) limits = min(self._hue_map.levels), max(self._hue_map.levels) hue_levels, hue_formatted_levels = locator_to_legend_entries( locator, limits, self.plot_data["hue"].dtype ) elif self._hue_map.levels is None: hue_levels = hue_formatted_levels = [] else: hue_levels = hue_formatted_levels = self._hue_map.levels # Add the hue semantic subtitle if "hue" in self.variables and self.variables["hue"] is not None: update((self.variables["hue"], "title"), self.variables["hue"], **title_kws) # Add the hue semantic labels for level, formatted_level in zip(hue_levels, hue_formatted_levels): if level is not None: color = self._hue_map(level) update(self.variables["hue"], formatted_level, color=color) # -- Add a legend for size semantics if verbosity == "brief" and self._size_map.map_type == "numeric": # Define how ticks will interpolate between the min/max data values if isinstance(self._size_map.norm, mpl.colors.LogNorm): locator = mpl.ticker.LogLocator(numticks=3) else: locator = mpl.ticker.MaxNLocator(nbins=3) # Define the min/max data values limits = min(self._size_map.levels), max(self._size_map.levels) size_levels, size_formatted_levels = locator_to_legend_entries( locator, limits, self.plot_data["size"].dtype ) elif self._size_map.levels is None: size_levels = size_formatted_levels = [] else: size_levels = size_formatted_levels = self._size_map.levels # Add the size semantic subtitle if "size" in self.variables and self.variables["size"] is not None: update((self.variables["size"], "title"), self.variables["size"], **title_kws) # Add the size semantic labels for level, formatted_level in zip(size_levels, size_formatted_levels): if level is not None: size = self._size_map(level) update( self.variables["size"], formatted_level, linewidth=size, s=size, ) # -- Add a legend for style semantics # Add the style semantic title if "style" in self.variables and self.variables["style"] is not None: update((self.variables["style"], "title"), self.variables["style"], **title_kws) # Add the style semantic labels if self._style_map.levels is not None: for level in self._style_map.levels: if level is not None: attrs = self._style_map(level) update( self.variables["style"], level, marker=attrs.get("marker", ""), dashes=attrs.get("dashes", ""), ) func = getattr(ax, self._legend_func) legend_data = {} legend_order = [] for key in keys: _, label = key kws = legend_kwargs[key] kws.setdefault("color", ".2") use_kws = {} for attr in self._legend_attributes + ["visible"]: if attr in kws: use_kws[attr] = kws[attr] artist = func([], [], label=label, **use_kws) if self._legend_func == "plot": artist = artist[0] legend_data[key] = artist legend_order.append(key) self.legend_data = legend_data self.legend_order = legend_order
def add_legend_data(self, ax): """Add labeled artists to represent the different plot semantics.""" verbosity = self.legend if verbosity not in ["brief", "full"]: err = "`legend` must be 'brief', 'full', or False" raise ValueError(err) legend_kwargs = {} keys = [] title_kws = dict(color="w", s=0, linewidth=0, marker="", dashes="") def update(var_name, val_name, **kws): key = var_name, val_name if key in legend_kwargs: legend_kwargs[key].update(**kws) else: keys.append(key) legend_kwargs[key] = dict(**kws) # -- Add a legend for hue semantics if verbosity == "brief" and self._hue_map.map_type == "numeric": if isinstance(self._hue_map.norm, mpl.colors.LogNorm): locator = mpl.ticker.LogLocator(numticks=3) else: locator = mpl.ticker.MaxNLocator(nbins=3) limits = min(self._hue_map.levels), max(self._hue_map.levels) hue_levels, hue_formatted_levels = locator_to_legend_entries( locator, limits, self.plot_data["hue"].dtype ) elif self._hue_map.levels is None: hue_levels = hue_formatted_levels = [] else: hue_levels = hue_formatted_levels = self._hue_map.levels # Add the hue semantic subtitle if "hue" in self.variables and self.variables["hue"] is not None: update((self.variables["hue"], "title"), self.variables["hue"], **title_kws) # Add the hue semantic labels for level, formatted_level in zip(hue_levels, hue_formatted_levels): if level is not None: color = self._hue_map(level) update(self.variables["hue"], formatted_level, color=color) # -- Add a legend for size semantics if verbosity == "brief" and self._size_map.map_type == "numeric": # Define how ticks will interpolate between the min/max data values if isinstance(self._size_map.norm, mpl.colors.LogNorm): locator = mpl.ticker.LogLocator(numticks=3) else: locator = mpl.ticker.MaxNLocator(nbins=3) # Define the min/max data values limits = min(self._size_map.levels), max(self._size_map.levels) size_levels, size_formatted_levels = locator_to_legend_entries( locator, limits, self.plot_data["size"].dtype ) elif self._size_map.levels is None: size_levels = size_formatted_levels = [] else: size_levels = size_formatted_levels = self._size_map.levels # Add the size semantic subtitle if "size" in self.variables and self.variables["size"] is not None: update((self.variables["size"], "title"), self.variables["size"], **title_kws) # Add the size semantic labels for level, formatted_level in zip(size_levels, size_formatted_levels): if level is not None: size = self._size_map(level) update( self.variables["size"], formatted_level, linewidth=size, s=size, ) # -- Add a legend for style semantics # Add the style semantic title if "style" in self.variables and self.variables["style"] is not None: update((self.variables["style"], "title"), self.variables["style"], **title_kws) # Add the style semantic labels if self._style_map.levels is not None: for level in self._style_map.levels: if level is not None: attrs = self._style_map(level) update( self.variables["style"], level, marker=attrs.get("marker", ""), dashes=attrs.get("dashes", ""), ) func = getattr(ax, self._legend_func) legend_data = {} legend_order = [] for key in keys: _, label = key kws = legend_kwargs[key] kws.setdefault("color", ".2") use_kws = {} for attr in self._legend_attributes + ["visible"]: if attr in kws: use_kws[attr] = kws[attr] artist = func([], [], label=label, **use_kws) if self._legend_func == "plot": artist = artist[0] legend_data[key] = artist legend_order.append(key) self.legend_data = legend_data self.legend_order = legend_order
Python
def plot(self, ax, kws): """Draw the plot onto an axes, passing matplotlib kwargs.""" # Draw a test plot, using the passed in kwargs. The goal here is to # honor both (a) the current state of the plot cycler and (b) the # specified kwargs on all the lines we will draw, overriding when # relevant with the data semantics. Note that we won't cycle # internally; in other words, if ``hue`` is not used, all elements will # have the same color, but they will have the color that you would have # gotten from the corresponding matplotlib function, and calling the # function will advance the axes property cycle. scout, = ax.plot([], [], **kws) orig_color = kws.pop("color", scout.get_color()) orig_marker = kws.pop("marker", scout.get_marker()) orig_linewidth = kws.pop("linewidth", kws.pop("lw", scout.get_linewidth())) # Note that scout.get_linestyle() is` not correct as of mpl 3.2 orig_linestyle = kws.pop("linestyle", kws.pop("ls", None)) kws.setdefault("markeredgewidth", kws.pop("mew", .75)) kws.setdefault("markeredgecolor", kws.pop("mec", "w")) scout.remove() # Set default error kwargs err_kws = self.err_kws.copy() if self.err_style == "band": err_kws.setdefault("alpha", .2) elif self.err_style == "bars": pass elif self.err_style is not None: err = "`err_style` must be 'band' or 'bars', not {}" raise ValueError(err.format(self.err_style)) # Set the default artist keywords kws.update(dict( color=orig_color, marker=orig_marker, linewidth=orig_linewidth, linestyle=orig_linestyle, )) # Loop over the semantic subsets and add to the plot grouping_semantics = "hue", "size", "style" for sub_vars, sub_data in self._semantic_subsets(grouping_semantics): if self.sort: sub_data = sub_data.sort_values(["units", "x", "y"]) x = sub_data["x"] y = sub_data["y"] u = sub_data["units"] if self.estimator is not None: if "units" in self.variables: err = "estimator must be None when specifying units" raise ValueError(err) x, y, y_ci = self.aggregate(y, x, u) else: y_ci = None if "hue" in sub_vars: kws["color"] = self._hue_map(sub_vars["hue"]) if "size" in sub_vars: kws["linewidth"] = self._size_map(sub_vars["size"]) if "style" in sub_vars: attributes = self._style_map(sub_vars["style"]) if "dashes" in attributes: kws["dashes"] = attributes["dashes"] if "marker" in attributes: kws["marker"] = attributes["marker"] line, = ax.plot([], [], **kws) line_color = line.get_color() line_alpha = line.get_alpha() line_capstyle = line.get_solid_capstyle() line.remove() # --- Draw the main line x, y = np.asarray(x), np.asarray(y) if "units" in self.variables: for u_i in u.unique(): rows = np.asarray(u == u_i) ax.plot(x[rows], y[rows], **kws) else: line, = ax.plot(x, y, **kws) # --- Draw the confidence intervals if y_ci is not None: low, high = np.asarray(y_ci["low"]), np.asarray(y_ci["high"]) if self.err_style == "band": ax.fill_between(x, low, high, color=line_color, **err_kws) elif self.err_style == "bars": y_err = ci_to_errsize((low, high), y) ebars = ax.errorbar(x, y, y_err, linestyle="", color=line_color, alpha=line_alpha, **err_kws) # Set the capstyle properly on the error bars for obj in ebars.get_children(): try: obj.set_capstyle(line_capstyle) except AttributeError: # Does not exist on mpl < 2.2 pass # Finalize the axes details self.label_axes(ax) if self.legend: self.add_legend_data(ax) handles, _ = ax.get_legend_handles_labels() if handles: ax.legend()
def plot(self, ax, kws): """Draw the plot onto an axes, passing matplotlib kwargs.""" # Draw a test plot, using the passed in kwargs. The goal here is to # honor both (a) the current state of the plot cycler and (b) the # specified kwargs on all the lines we will draw, overriding when # relevant with the data semantics. Note that we won't cycle # internally; in other words, if ``hue`` is not used, all elements will # have the same color, but they will have the color that you would have # gotten from the corresponding matplotlib function, and calling the # function will advance the axes property cycle. scout, = ax.plot([], [], **kws) orig_color = kws.pop("color", scout.get_color()) orig_marker = kws.pop("marker", scout.get_marker()) orig_linewidth = kws.pop("linewidth", kws.pop("lw", scout.get_linewidth())) # Note that scout.get_linestyle() is` not correct as of mpl 3.2 orig_linestyle = kws.pop("linestyle", kws.pop("ls", None)) kws.setdefault("markeredgewidth", kws.pop("mew", .75)) kws.setdefault("markeredgecolor", kws.pop("mec", "w")) scout.remove() # Set default error kwargs err_kws = self.err_kws.copy() if self.err_style == "band": err_kws.setdefault("alpha", .2) elif self.err_style == "bars": pass elif self.err_style is not None: err = "`err_style` must be 'band' or 'bars', not {}" raise ValueError(err.format(self.err_style)) # Set the default artist keywords kws.update(dict( color=orig_color, marker=orig_marker, linewidth=orig_linewidth, linestyle=orig_linestyle, )) # Loop over the semantic subsets and add to the plot grouping_semantics = "hue", "size", "style" for sub_vars, sub_data in self._semantic_subsets(grouping_semantics): if self.sort: sub_data = sub_data.sort_values(["units", "x", "y"]) x = sub_data["x"] y = sub_data["y"] u = sub_data["units"] if self.estimator is not None: if "units" in self.variables: err = "estimator must be None when specifying units" raise ValueError(err) x, y, y_ci = self.aggregate(y, x, u) else: y_ci = None if "hue" in sub_vars: kws["color"] = self._hue_map(sub_vars["hue"]) if "size" in sub_vars: kws["linewidth"] = self._size_map(sub_vars["size"]) if "style" in sub_vars: attributes = self._style_map(sub_vars["style"]) if "dashes" in attributes: kws["dashes"] = attributes["dashes"] if "marker" in attributes: kws["marker"] = attributes["marker"] line, = ax.plot([], [], **kws) line_color = line.get_color() line_alpha = line.get_alpha() line_capstyle = line.get_solid_capstyle() line.remove() # --- Draw the main line x, y = np.asarray(x), np.asarray(y) if "units" in self.variables: for u_i in u.unique(): rows = np.asarray(u == u_i) ax.plot(x[rows], y[rows], **kws) else: line, = ax.plot(x, y, **kws) # --- Draw the confidence intervals if y_ci is not None: low, high = np.asarray(y_ci["low"]), np.asarray(y_ci["high"]) if self.err_style == "band": ax.fill_between(x, low, high, color=line_color, **err_kws) elif self.err_style == "bars": y_err = ci_to_errsize((low, high), y) ebars = ax.errorbar(x, y, y_err, linestyle="", color=line_color, alpha=line_alpha, **err_kws) # Set the capstyle properly on the error bars for obj in ebars.get_children(): try: obj.set_capstyle(line_capstyle) except AttributeError: # Does not exist on mpl < 2.2 pass # Finalize the axes details self.label_axes(ax) if self.legend: self.add_legend_data(ax) handles, _ = ax.get_legend_handles_labels() if handles: ax.legend()
Python
def run(): """ Loads a gamefix module by it's game id """ log.info('Running game ' + game_id()) if not game_id(): return game = Game(game_id()) localpath = os.path.expanduser('~/.local/share/steamfixes/localfixes') if os.path.isfile(os.path.join(localpath, game_id() + '.py')): sys.path.append(os.path.expanduser('~/.local/share/steamfixes')) try: game_module = import_module('localfixes.' + game_id()) game_module.main(game) except ImportError: pass else: try: game_module = import_module('steamfixes.gamefixes.' + game_id()) game_module.main(game) print('loaded module') except ImportError: pass game.run()
def run(): """ Loads a gamefix module by it's game id """ log.info('Running game ' + game_id()) if not game_id(): return game = Game(game_id()) localpath = os.path.expanduser('~/.local/share/steamfixes/localfixes') if os.path.isfile(os.path.join(localpath, game_id() + '.py')): sys.path.append(os.path.expanduser('~/.local/share/steamfixes')) try: game_module = import_module('localfixes.' + game_id()) game_module.main(game) except ImportError: pass else: try: game_module = import_module('steamfixes.gamefixes.' + game_id()) game_module.main(game) print('loaded module') except ImportError: pass game.run()
Python
def changeMovement(*args): """Function which modifies the linear and angular velocity of the robot""" movement = args[0] global linear_x global angular_z if (movement == "stop"): linear_x = 0 angular_z = 0 elif (movement == "forward"): if (linear_x >= 1): print("Impossible d'avancer plus.") return linear_x = linear_x + 0.2 elif (movement == "backward"): if (linear_x <= -1): print("Impossible de reculer plus.") return linear_x = linear_x - 0.2 elif (movement == "left"): if (angular_z >= 1): print("Impossible de gaucher plus.") return angular_z = angular_z + 0.2 elif (movement == "right"): if (angular_z <= -1): print("Impossible de droiter plus.") return angular_z = angular_z - 0.2 else: print("Instruction has not been understood.") linear = Vector3() linear.x = linear_x linear.y = 0 linear.z = 0 angular = Vector3() angular.x = 0 angular.y = 0 angular.z = angular_z instruction = Twist(linear, angular) pub.publish(instruction)
def changeMovement(*args): """Function which modifies the linear and angular velocity of the robot""" movement = args[0] global linear_x global angular_z if (movement == "stop"): linear_x = 0 angular_z = 0 elif (movement == "forward"): if (linear_x >= 1): print("Impossible d'avancer plus.") return linear_x = linear_x + 0.2 elif (movement == "backward"): if (linear_x <= -1): print("Impossible de reculer plus.") return linear_x = linear_x - 0.2 elif (movement == "left"): if (angular_z >= 1): print("Impossible de gaucher plus.") return angular_z = angular_z + 0.2 elif (movement == "right"): if (angular_z <= -1): print("Impossible de droiter plus.") return angular_z = angular_z - 0.2 else: print("Instruction has not been understood.") linear = Vector3() linear.x = linear_x linear.y = 0 linear.z = 0 angular = Vector3() angular.x = 0 angular.y = 0 angular.z = angular_z instruction = Twist(linear, angular) pub.publish(instruction)
Python
def cli( dataset_name, area, outdir=None, end_date=None, frequency=None, driver_path=None, config_path=None, username=None, password=None, driver_flags=None, driver_prefs=None): """ Entry point for the pull_fb cli. """ pull_fb(dataset_name, area, outdir, end_date, frequency, driver_path, config_path, username, password, driver_flags, driver_prefs)
def cli( dataset_name, area, outdir=None, end_date=None, frequency=None, driver_path=None, config_path=None, username=None, password=None, driver_flags=None, driver_prefs=None): """ Entry point for the pull_fb cli. """ pull_fb(dataset_name, area, outdir, end_date, frequency, driver_path, config_path, username, password, driver_flags, driver_prefs)
Python
def split_cons_vowel(word): '''Returns tuple consists of consonant and vowel parts ''' # TODO Fix when left side is "gi" for idx, char in enumerate(word): if char in FULL_VOWELS: return (word[:idx], word[idx:]) raise InvalidWord("Vietnamese word must contain at least 1 vowel")
def split_cons_vowel(word): '''Returns tuple consists of consonant and vowel parts ''' # TODO Fix when left side is "gi" for idx, char in enumerate(word): if char in FULL_VOWELS: return (word[:idx], word[idx:]) raise InvalidWord("Vietnamese word must contain at least 1 vowel")
Python
def fetch_housing_data(housing_url = HOUSING_URL, housing_path = HOUSING_PATH,): ''' @brief fetch housing.tgz & extract to HOUSING_PATH @param housing_url the url of data @param housing_path the path to save data ''' #check dir/file if not os.path.isdir(housing_path): os.makedirs(housing_path) tgz_path = os.path.join(housing_path,'housing.tgz') #download urllib.request.urlretrieve(housing_url,tgz_path) housing_tgz = tarfile.open(tgz_path) housing_tgz.extractall(path = housing_path) housing_tgz.close()
def fetch_housing_data(housing_url = HOUSING_URL, housing_path = HOUSING_PATH,): ''' @brief fetch housing.tgz & extract to HOUSING_PATH @param housing_url the url of data @param housing_path the path to save data ''' #check dir/file if not os.path.isdir(housing_path): os.makedirs(housing_path) tgz_path = os.path.join(housing_path,'housing.tgz') #download urllib.request.urlretrieve(housing_url,tgz_path) housing_tgz = tarfile.open(tgz_path) housing_tgz.extractall(path = housing_path) housing_tgz.close()
Python
def transform(self,X): ''' @brief select features frome X by attributes names @param X the all features ''' return X[self.attribute_names].values
def transform(self,X): ''' @brief select features frome X by attributes names @param X the all features ''' return X[self.attribute_names].values
Python
def main(argv): """ classify argv image by KNN algorithm if argv[1] = none compute the accuracy of test set """ train_set = load_train_data(TRAIN_SET_FILE) if 1 == len(argv) : #none image input,calculate the accuracy of test set print('do predictions\r\n') test_set = load_test_data(TEST_SET_FILE) predictions = [] for test_vector in test_set['images']: #store the distances of each class dists = np.array([0]*10) #store the index of min k distances&value of min k distances ks = {'indexs':[],'values':[],'labels':[]} #vectors represent distance of two images vector vectors = np.array(train_set['images']) - np.array(test_vector) L2s = np.linalg.norm(vectors,ord=2,axis=1) for i in range(K): ks['indexs'].append(L2s.argmin()) ks['values'].append(L2s[ks['indexs'][i]]) ks['labels'].append(train_set['labels'][ks['indexs'][i]]) np.delete(L2s,ks['indexs'][i]) dists[ks['labels'][i]] += ks['values'][i] predictions.append(dists.argmax()) with open(r'./predictions.csv','w') as f: f.write('ImageId,Label\n') for i,v in enumerate(predictions): f.write(str(i+1)+','+str(v)+'\n')
def main(argv): """ classify argv image by KNN algorithm if argv[1] = none compute the accuracy of test set """ train_set = load_train_data(TRAIN_SET_FILE) if 1 == len(argv) : #none image input,calculate the accuracy of test set print('do predictions\r\n') test_set = load_test_data(TEST_SET_FILE) predictions = [] for test_vector in test_set['images']: #store the distances of each class dists = np.array([0]*10) #store the index of min k distances&value of min k distances ks = {'indexs':[],'values':[],'labels':[]} #vectors represent distance of two images vector vectors = np.array(train_set['images']) - np.array(test_vector) L2s = np.linalg.norm(vectors,ord=2,axis=1) for i in range(K): ks['indexs'].append(L2s.argmin()) ks['values'].append(L2s[ks['indexs'][i]]) ks['labels'].append(train_set['labels'][ks['indexs'][i]]) np.delete(L2s,ks['indexs'][i]) dists[ks['labels'][i]] += ks['values'][i] predictions.append(dists.argmax()) with open(r'./predictions.csv','w') as f: f.write('ImageId,Label\n') for i,v in enumerate(predictions): f.write(str(i+1)+','+str(v)+'\n')
Python
def config(settings): """ Template settings for Myanmar - designed to be used in a Cascade with an application template """ #T = current.T # Pre-Populate settings.base.prepopulate.append("locations/IN") # Restrict to specific country/countries settings.gis.countries.append("IN") # Disable the Postcode selector in the LocationSelector settings.gis.postcode_selector = False # L10n (Localization) settings settings.L10n.languages["hi"] = "Hindi" # Default Language (put this in custom template if-required) #settings.L10n.default_language = "hi" # Default timezone for users settings.L10n.utc_offset = "+0530" # Default Country Code for telephone numbers settings.L10n.default_country_code = 91 settings.fin.currencies["INR"] = "Indian Rupees" settings.fin.currency_default = "INR"
def config(settings): """ Template settings for Myanmar - designed to be used in a Cascade with an application template """ #T = current.T # Pre-Populate settings.base.prepopulate.append("locations/IN") # Restrict to specific country/countries settings.gis.countries.append("IN") # Disable the Postcode selector in the LocationSelector settings.gis.postcode_selector = False # L10n (Localization) settings settings.L10n.languages["hi"] = "Hindi" # Default Language (put this in custom template if-required) #settings.L10n.default_language = "hi" # Default timezone for users settings.L10n.utc_offset = "+0530" # Default Country Code for telephone numbers settings.L10n.default_country_code = 91 settings.fin.currencies["INR"] = "Indian Rupees" settings.fin.currency_default = "INR"
Python
def req_need_commit(r, **attr): """ Custom method to Commit to a Need by creating an Activity Group """ # Create Activity Group (Response) with values from Need need_id = r.id db = current.db s3db = current.s3db ntable = s3db.req_need ntable_id = ntable.id netable = s3db.event_event_need left = [netable.on(netable.need_id == ntable_id), ] need = db(ntable_id == need_id).select(ntable.name, ntable.location_id, netable.event_id, left = left, limitby = (0, 1) ).first() nttable = s3db.req_need_tag query = (nttable.need_id == need_id) & \ (nttable.tag.belongs(("address", "contact"))) & \ (nttable.deleted == False) tags = db(query).select(nttable.tag, nttable.value, ) contact = address = None for tag in tags: if tag.tag == "address": address = tag.value elif tag.tag == "contact": contact = tag.value nrtable = s3db.req_need_response need_response_id = nrtable.insert(need_id = need_id, name = need["req_need.name"], location_id = need["req_need.location_id"], contact = contact, address = address, ) organisation_id = current.auth.user.organisation_id if organisation_id: s3db.req_need_response_organisation.insert(need_response_id = need_response_id, organisation_id = organisation_id, role = 1, ) event_id = need["event_event_need.event_id"] if event_id: aetable = s3db.event_event_need_response aetable.insert(need_response_id = need_response_id, event_id = event_id, ) nltable = s3db.req_need_line query = (nltable.need_id == need_id) & \ (nltable.deleted == False) lines = db(query).select(nltable.id, nltable.coarse_location_id, nltable.location_id, nltable.sector_id, nltable.parameter_id, nltable.value, nltable.value_uncommitted, nltable.item_category_id, nltable.item_id, nltable.item_pack_id, nltable.quantity, nltable.quantity_uncommitted, nltable.status, ) if lines: linsert = s3db.req_need_response_line.insert for line in lines: value_uncommitted = line.value_uncommitted if value_uncommitted is None: # No commitments yet so commit to all value = line.value else: # Only commit to the remainder value = value_uncommitted quantity_uncommitted = line.quantity_uncommitted if quantity_uncommitted is None: # No commitments yet so commit to all quantity = line.quantity else: # Only commit to the remainder quantity = quantity_uncommitted need_line_id = line.id linsert(need_response_id = need_response_id, need_line_id = need_line_id, coarse_location_id = line.coarse_location_id, location_id = line.location_id, sector_id = line.sector_id, parameter_id = line.parameter_id, value = value, item_category_id = line.item_category_id, item_id = line.item_id, item_pack_id = line.item_pack_id, quantity = quantity, ) # Update Need Line status req_need_line_status_update(need_line_id) # Redirect to Update from gluon import redirect redirect(URL(c= "req", f="need_response", args = [need_response_id, "update"], ))
def req_need_commit(r, **attr): """ Custom method to Commit to a Need by creating an Activity Group """ # Create Activity Group (Response) with values from Need need_id = r.id db = current.db s3db = current.s3db ntable = s3db.req_need ntable_id = ntable.id netable = s3db.event_event_need left = [netable.on(netable.need_id == ntable_id), ] need = db(ntable_id == need_id).select(ntable.name, ntable.location_id, netable.event_id, left = left, limitby = (0, 1) ).first() nttable = s3db.req_need_tag query = (nttable.need_id == need_id) & \ (nttable.tag.belongs(("address", "contact"))) & \ (nttable.deleted == False) tags = db(query).select(nttable.tag, nttable.value, ) contact = address = None for tag in tags: if tag.tag == "address": address = tag.value elif tag.tag == "contact": contact = tag.value nrtable = s3db.req_need_response need_response_id = nrtable.insert(need_id = need_id, name = need["req_need.name"], location_id = need["req_need.location_id"], contact = contact, address = address, ) organisation_id = current.auth.user.organisation_id if organisation_id: s3db.req_need_response_organisation.insert(need_response_id = need_response_id, organisation_id = organisation_id, role = 1, ) event_id = need["event_event_need.event_id"] if event_id: aetable = s3db.event_event_need_response aetable.insert(need_response_id = need_response_id, event_id = event_id, ) nltable = s3db.req_need_line query = (nltable.need_id == need_id) & \ (nltable.deleted == False) lines = db(query).select(nltable.id, nltable.coarse_location_id, nltable.location_id, nltable.sector_id, nltable.parameter_id, nltable.value, nltable.value_uncommitted, nltable.item_category_id, nltable.item_id, nltable.item_pack_id, nltable.quantity, nltable.quantity_uncommitted, nltable.status, ) if lines: linsert = s3db.req_need_response_line.insert for line in lines: value_uncommitted = line.value_uncommitted if value_uncommitted is None: # No commitments yet so commit to all value = line.value else: # Only commit to the remainder value = value_uncommitted quantity_uncommitted = line.quantity_uncommitted if quantity_uncommitted is None: # No commitments yet so commit to all quantity = line.quantity else: # Only commit to the remainder quantity = quantity_uncommitted need_line_id = line.id linsert(need_response_id = need_response_id, need_line_id = need_line_id, coarse_location_id = line.coarse_location_id, location_id = line.location_id, sector_id = line.sector_id, parameter_id = line.parameter_id, value = value, item_category_id = line.item_category_id, item_id = line.item_id, item_pack_id = line.item_pack_id, quantity = quantity, ) # Update Need Line status req_need_line_status_update(need_line_id) # Redirect to Update from gluon import redirect redirect(URL(c= "req", f="need_response", args = [need_response_id, "update"], ))
Python
def req_need_line_commit(r, **attr): """ Custom method to Commit to a Need Line by creating an Activity """ # Create Activity with values from Need Line need_line_id = r.id db = current.db s3db = current.s3db nltable = s3db.req_need_line query = (nltable.id == need_line_id) line = db(query).select(nltable.id, nltable.need_id, nltable.coarse_location_id, nltable.location_id, nltable.sector_id, nltable.parameter_id, nltable.value, nltable.value_uncommitted, nltable.item_category_id, nltable.item_id, nltable.item_pack_id, nltable.quantity, nltable.quantity_uncommitted, nltable.status, limitby = (0, 1) ).first() need_id = line.need_id ntable = s3db.req_need ntable_id = ntable.id netable = s3db.event_event_need left = [netable.on(netable.need_id == ntable_id), ] need = db(ntable_id == need_id).select(ntable.name, ntable.location_id, netable.event_id, left = left, limitby = (0, 1) ).first() nttable = s3db.req_need_tag query = (nttable.need_id == need_id) & \ (nttable.tag.belongs(("address", "contact"))) & \ (nttable.deleted == False) tags = db(query).select(nttable.tag, nttable.value, ) contact = address = None for tag in tags: if tag.tag == "address": address = tag.value elif tag.tag == "contact": contact = tag.value nrtable = s3db.req_need_response need_response_id = nrtable.insert(need_id = need_id, name = need["req_need.name"], location_id = need["req_need.location_id"], contact = contact, address = address, ) organisation_id = current.auth.user.organisation_id if organisation_id: s3db.req_need_response_organisation.insert(need_response_id = need_response_id, organisation_id = organisation_id, role = 1, ) event_id = need["event_event_need.event_id"] if event_id: aetable = s3db.event_event_need_response aetable.insert(need_response_id = need_response_id, event_id = event_id, ) value_uncommitted = line.value_uncommitted if value_uncommitted is None: # No commitments yet so commit to all value = line.value else: # Only commit to the remainder value = value_uncommitted quantity_uncommitted = line.quantity_uncommitted if quantity_uncommitted is None: # No commitments yet so commit to all quantity = line.quantity else: # Only commit to the remainder quantity = quantity_uncommitted s3db.req_need_response_line.insert(need_response_id = need_response_id, need_line_id = need_line_id, coarse_location_id = line.coarse_location_id, location_id = line.location_id, sector_id = line.sector_id, parameter_id = line.parameter_id, value = value, item_category_id = line.item_category_id, item_id = line.item_id, item_pack_id = line.item_pack_id, quantity = quantity, ) # Update Need Line status req_need_line_status_update(need_line_id) # Redirect to Update from gluon import redirect redirect(URL(c= "req", f="need_response", args = [need_response_id, "update"], ))
def req_need_line_commit(r, **attr): """ Custom method to Commit to a Need Line by creating an Activity """ # Create Activity with values from Need Line need_line_id = r.id db = current.db s3db = current.s3db nltable = s3db.req_need_line query = (nltable.id == need_line_id) line = db(query).select(nltable.id, nltable.need_id, nltable.coarse_location_id, nltable.location_id, nltable.sector_id, nltable.parameter_id, nltable.value, nltable.value_uncommitted, nltable.item_category_id, nltable.item_id, nltable.item_pack_id, nltable.quantity, nltable.quantity_uncommitted, nltable.status, limitby = (0, 1) ).first() need_id = line.need_id ntable = s3db.req_need ntable_id = ntable.id netable = s3db.event_event_need left = [netable.on(netable.need_id == ntable_id), ] need = db(ntable_id == need_id).select(ntable.name, ntable.location_id, netable.event_id, left = left, limitby = (0, 1) ).first() nttable = s3db.req_need_tag query = (nttable.need_id == need_id) & \ (nttable.tag.belongs(("address", "contact"))) & \ (nttable.deleted == False) tags = db(query).select(nttable.tag, nttable.value, ) contact = address = None for tag in tags: if tag.tag == "address": address = tag.value elif tag.tag == "contact": contact = tag.value nrtable = s3db.req_need_response need_response_id = nrtable.insert(need_id = need_id, name = need["req_need.name"], location_id = need["req_need.location_id"], contact = contact, address = address, ) organisation_id = current.auth.user.organisation_id if organisation_id: s3db.req_need_response_organisation.insert(need_response_id = need_response_id, organisation_id = organisation_id, role = 1, ) event_id = need["event_event_need.event_id"] if event_id: aetable = s3db.event_event_need_response aetable.insert(need_response_id = need_response_id, event_id = event_id, ) value_uncommitted = line.value_uncommitted if value_uncommitted is None: # No commitments yet so commit to all value = line.value else: # Only commit to the remainder value = value_uncommitted quantity_uncommitted = line.quantity_uncommitted if quantity_uncommitted is None: # No commitments yet so commit to all quantity = line.quantity else: # Only commit to the remainder quantity = quantity_uncommitted s3db.req_need_response_line.insert(need_response_id = need_response_id, need_line_id = need_line_id, coarse_location_id = line.coarse_location_id, location_id = line.location_id, sector_id = line.sector_id, parameter_id = line.parameter_id, value = value, item_category_id = line.item_category_id, item_id = line.item_id, item_pack_id = line.item_pack_id, quantity = quantity, ) # Update Need Line status req_need_line_status_update(need_line_id) # Redirect to Update from gluon import redirect redirect(URL(c= "req", f="need_response", args = [need_response_id, "update"], ))
Python
def req_need_line_status_update(need_line_id): """ Update the Need Line's fulfilment Status """ db = current.db s3db = current.s3db # Read the Line details nltable = s3db.req_need_line iptable = s3db.supply_item_pack query = (nltable.id == need_line_id) left = iptable.on(nltable.item_pack_id == iptable.id) need_line = db(query).select(nltable.parameter_id, nltable.value, nltable.item_id, nltable.quantity, iptable.quantity, left = left, limitby = (0, 1) ).first() need_pack_qty = need_line["supply_item_pack.quantity"] need_line = need_line["req_need_line"] need_parameter_id = need_line.parameter_id need_value = need_line.value or 0 need_value_committed = 0 need_value_reached = 0 need_quantity = need_line.quantity if need_quantity: need_quantity = need_quantity * need_pack_qty else: need_quantity = 0 need_item_id = need_line.item_id need_quantity_committed = 0 need_quantity_delivered = 0 # Lookup which Status means 'Cancelled' stable = s3db.project_status status = db(stable.name == "Cancelled").select(stable.id, limitby = (0, 1) ).first() try: CANCELLED = status.id except AttributeError: # Prepop not done? Name changed? current.log.debug("'Cancelled' Status not found") CANCELLED = 999999 # Read the details of all Response Lines linked to this Need Line rltable = s3db.req_need_response_line iptable = s3db.supply_item_pack query = (rltable.need_line_id == need_line_id) & \ (rltable.deleted == False) left = iptable.on(rltable.item_pack_id == iptable.id) response_lines = db(query).select(rltable.id, rltable.parameter_id, rltable.value, rltable.value_reached, rltable.item_id, iptable.quantity, rltable.quantity, rltable.quantity_delivered, rltable.status_id, left = left, ) for line in response_lines: pack_qty = line["supply_item_pack.quantity"] line = line["req_need_response_line"] if line.status_id == CANCELLED: continue if line.parameter_id == need_parameter_id: value = line.value if value: need_value_committed += value value_reached = line.value_reached if value_reached: need_value_reached += value_reached if line.item_id == need_item_id: quantity = line.quantity if quantity: need_quantity_committed += quantity * pack_qty quantity_delivered = line.quantity_delivered if quantity_delivered: need_quantity_delivered += quantity_delivered * pack_qty # Calculate Need values & Update value_uncommitted = max(need_value - need_value_committed, 0) quantity_uncommitted = max(need_quantity - need_quantity_committed, 0) if (need_quantity_delivered >= need_quantity) and (need_value_reached >= need_value): status = 3 elif (quantity_uncommitted <= 0) and (value_uncommitted <= 0): status = 2 elif (need_quantity_committed > 0) or (need_value_committed > 0): status = 1 else: status = 0 db(nltable.id == need_line_id).update(value_committed = need_value_committed, value_uncommitted = value_uncommitted, value_reached = need_value_reached, quantity_committed = need_quantity_committed, quantity_uncommitted = quantity_uncommitted, quantity_delivered = need_quantity_delivered, status = status, )
def req_need_line_status_update(need_line_id): """ Update the Need Line's fulfilment Status """ db = current.db s3db = current.s3db # Read the Line details nltable = s3db.req_need_line iptable = s3db.supply_item_pack query = (nltable.id == need_line_id) left = iptable.on(nltable.item_pack_id == iptable.id) need_line = db(query).select(nltable.parameter_id, nltable.value, nltable.item_id, nltable.quantity, iptable.quantity, left = left, limitby = (0, 1) ).first() need_pack_qty = need_line["supply_item_pack.quantity"] need_line = need_line["req_need_line"] need_parameter_id = need_line.parameter_id need_value = need_line.value or 0 need_value_committed = 0 need_value_reached = 0 need_quantity = need_line.quantity if need_quantity: need_quantity = need_quantity * need_pack_qty else: need_quantity = 0 need_item_id = need_line.item_id need_quantity_committed = 0 need_quantity_delivered = 0 # Lookup which Status means 'Cancelled' stable = s3db.project_status status = db(stable.name == "Cancelled").select(stable.id, limitby = (0, 1) ).first() try: CANCELLED = status.id except AttributeError: # Prepop not done? Name changed? current.log.debug("'Cancelled' Status not found") CANCELLED = 999999 # Read the details of all Response Lines linked to this Need Line rltable = s3db.req_need_response_line iptable = s3db.supply_item_pack query = (rltable.need_line_id == need_line_id) & \ (rltable.deleted == False) left = iptable.on(rltable.item_pack_id == iptable.id) response_lines = db(query).select(rltable.id, rltable.parameter_id, rltable.value, rltable.value_reached, rltable.item_id, iptable.quantity, rltable.quantity, rltable.quantity_delivered, rltable.status_id, left = left, ) for line in response_lines: pack_qty = line["supply_item_pack.quantity"] line = line["req_need_response_line"] if line.status_id == CANCELLED: continue if line.parameter_id == need_parameter_id: value = line.value if value: need_value_committed += value value_reached = line.value_reached if value_reached: need_value_reached += value_reached if line.item_id == need_item_id: quantity = line.quantity if quantity: need_quantity_committed += quantity * pack_qty quantity_delivered = line.quantity_delivered if quantity_delivered: need_quantity_delivered += quantity_delivered * pack_qty # Calculate Need values & Update value_uncommitted = max(need_value - need_value_committed, 0) quantity_uncommitted = max(need_quantity - need_quantity_committed, 0) if (need_quantity_delivered >= need_quantity) and (need_value_reached >= need_value): status = 3 elif (quantity_uncommitted <= 0) and (value_uncommitted <= 0): status = 2 elif (need_quantity_committed > 0) or (need_value_committed > 0): status = 1 else: status = 0 db(nltable.id == need_line_id).update(value_committed = need_value_committed, value_uncommitted = value_uncommitted, value_reached = need_value_reached, quantity_committed = need_quantity_committed, quantity_uncommitted = quantity_uncommitted, quantity_delivered = need_quantity_delivered, status = status, )
Python
def homepage_stats_update(): """ Scheduler task to update the data files for the charts on the homepage """ from controllers import HomepageStatistics HomepageStatistics.update_data()
def homepage_stats_update(): """ Scheduler task to update the data files for the charts on the homepage """ from controllers import HomepageStatistics HomepageStatistics.update_data()
Python
def req_need_response_postprocess(form): """ Set the Realm Ensure that the Need Lines (if-any) have the correct Status """ db = current.db s3db = current.s3db need_response_id = form.vars.id # Lookup Organisation nrotable = s3db.req_need_response_organisation query = (nrotable.need_response_id == need_response_id) & \ (nrotable.role == 1) org_link = db(query).select(nrotable.organisation_id, limitby = (0, 1), ).first() if not org_link: return organisation_id = org_link.organisation_id # Lookup Realm otable = s3db.org_organisation org = db(otable.id == organisation_id).select(otable.pe_id, limitby = (0, 1), ).first() realm_entity = org.pe_id # Set Realm nrtable = s3db.req_need_response db(nrtable.id == need_response_id).update(realm_entity = realm_entity) rltable = s3db.req_need_response_line db(rltable.need_response_id == need_response_id).update(realm_entity = realm_entity) # Lookup the Need Lines query = (rltable.need_response_id == need_response_id) & \ (rltable.deleted == False) response_lines = db(query).select(rltable.need_line_id) for line in response_lines: need_line_id = line.need_line_id if need_line_id: req_need_line_status_update(need_line_id)
def req_need_response_postprocess(form): """ Set the Realm Ensure that the Need Lines (if-any) have the correct Status """ db = current.db s3db = current.s3db need_response_id = form.vars.id # Lookup Organisation nrotable = s3db.req_need_response_organisation query = (nrotable.need_response_id == need_response_id) & \ (nrotable.role == 1) org_link = db(query).select(nrotable.organisation_id, limitby = (0, 1), ).first() if not org_link: return organisation_id = org_link.organisation_id # Lookup Realm otable = s3db.org_organisation org = db(otable.id == organisation_id).select(otable.pe_id, limitby = (0, 1), ).first() realm_entity = org.pe_id # Set Realm nrtable = s3db.req_need_response db(nrtable.id == need_response_id).update(realm_entity = realm_entity) rltable = s3db.req_need_response_line db(rltable.need_response_id == need_response_id).update(realm_entity = realm_entity) # Lookup the Need Lines query = (rltable.need_response_id == need_response_id) & \ (rltable.deleted == False) response_lines = db(query).select(rltable.need_line_id) for line in response_lines: need_line_id = line.need_line_id if need_line_id: req_need_line_status_update(need_line_id)
Python
def req_need_response_line_ondelete(row): """ Ensure that the Need Line (if-any) has the correct Status """ import json db = current.db s3db = current.s3db response_line_id = row.get("id") # Lookup the Need Line rltable = s3db.req_need_response_line record = db(rltable.id == response_line_id).select(rltable.deleted_fk, limitby = (0, 1) ).first() if not record: return deleted_fk = json.loads(record.deleted_fk) need_line_id = deleted_fk.get("need_line_id") if not need_line_id: return # Check that the Need Line hasn't been deleted nltable = s3db.req_need_line need_line = db(nltable.id == need_line_id).select(nltable.deleted, limitby = (0, 1) ).first() if need_line and not need_line.deleted: req_need_line_status_update(need_line_id)
def req_need_response_line_ondelete(row): """ Ensure that the Need Line (if-any) has the correct Status """ import json db = current.db s3db = current.s3db response_line_id = row.get("id") # Lookup the Need Line rltable = s3db.req_need_response_line record = db(rltable.id == response_line_id).select(rltable.deleted_fk, limitby = (0, 1) ).first() if not record: return deleted_fk = json.loads(record.deleted_fk) need_line_id = deleted_fk.get("need_line_id") if not need_line_id: return # Check that the Need Line hasn't been deleted nltable = s3db.req_need_line need_line = db(nltable.id == need_line_id).select(nltable.deleted, limitby = (0, 1) ).first() if need_line and not need_line.deleted: req_need_line_status_update(need_line_id)
Python
def update_check(settings): """ Check whether the dependencies are sufficient to run Eden @ToDo: Load deployment_settings so that we can configure the update_check - need to rework so that 000_config.py is parsed 1st @param settings: the deployment_settings """ # Get Web2py environment into our globals. #globals().update(**environment) request = current.request # Fatal errors errors = [] # Non-fatal warnings warnings = [] # ------------------------------------------------------------------------- # Check Python libraries # Get mandatory global dependencies app_path = request.folder gr_path = os.path.join(app_path, "requirements.txt") or_path = os.path.join(app_path, "optional_requirements.txt") global_dep = parse_requirements({}, gr_path) optional_dep = parse_requirements({}, or_path) templates = settings.get_template() if not isinstance(templates, (tuple, list)): templates = (templates,) template_dep = {} template_optional_dep = {} for template in templates: tr_path = os.path.join(app_path, "modules", "templates", template, "requirements.txt") tor_path = os.path.join(app_path, "modules", "templates", template, "optional_requirements.txt") parse_requirements(template_dep, tr_path) parse_requirements(template_optional_dep, tor_path) # Remove optional dependencies which are already accounted for in template dependencies unique = set(optional_dep.keys()).difference(set(template_dep.keys())) for dependency in optional_dep.keys(): if dependency not in unique: del optional_dep[dependency] # Override optional dependency messages from template unique = set(optional_dep.keys()).difference(set(template_optional_dep.keys())) for dependency in optional_dep.keys(): if dependency not in unique: del optional_dep[dependency] errors, warnings = s3_check_python_lib(global_dep, template_dep, template_optional_dep, optional_dep) # @ToDo: Move these to Template # for now this is done in s3db.climate_first_run() if settings.has_module("climate"): if settings.get_database_type() != "postgres": errors.append("Climate unresolved dependency: PostgreSQL required") try: import rpy2 except ImportError: errors.append("Climate unresolved dependency: RPy2 required") try: from Scientific.IO import NetCDF except ImportError: warnings.append("Climate unresolved dependency: NetCDF required if you want to import readings") try: from scipy import stats except ImportError: warnings.append("Climate unresolved dependency: SciPy required if you want to generate graphs on the map") # ------------------------------------------------------------------------- # Check Web2Py version # # Currently, the minimum usable Web2py is determined by whether the # Scheduler is available web2py_minimum_version = "Version 2.4.7-stable+timestamp.2013.05.27.11.49.44" # Offset of datetime in return value of parse_version. datetime_index = 4 web2py_version_ok = True try: from gluon.fileutils import parse_version except ImportError: web2py_version_ok = False if web2py_version_ok: try: web2py_minimum_parsed = parse_version(web2py_minimum_version) web2py_minimum_datetime = web2py_minimum_parsed[datetime_index] version_info = open("VERSION", "r") web2py_installed_version = version_info.read().split()[-1].strip() version_info.close() if isinstance(web2py_installed_version, str): # Post 2.4.2, global_settings.web2py_version is unparsed web2py_installed_parsed = parse_version(web2py_installed_version) web2py_installed_datetime = web2py_installed_parsed[datetime_index] else: # 2.4.2 & earlier style web2py_installed_datetime = web2py_installed_version[datetime_index] web2py_version_ok = web2py_installed_datetime >= web2py_minimum_datetime except: # Will get AttributeError if Web2py's parse_version is too old for # its current version format, which changed in 2.3.2. web2py_version_ok = False if not web2py_version_ok: warnings.append( "The installed version of Web2py is too old to support the current version of Sahana Eden." "\nPlease upgrade Web2py to at least version: %s" % \ web2py_minimum_version) # ------------------------------------------------------------------------- # Create required directories if needed databases_dir = os.path.join(app_path, "databases") try: os.stat(databases_dir) except OSError: # not found, create it os.mkdir(databases_dir) # ------------------------------------------------------------------------- # Copy in Templates # - 000_config.py (machine-specific settings) # - rest are run in-place # template_folder = os.path.join(app_path, "modules", "templates") template_files = { # source : destination "000_config.py" : os.path.join("models", "000_config.py"), } copied_from_template = [] for t in template_files: src_path = os.path.join(template_folder, t) dst_path = os.path.join(app_path, template_files[t]) try: os.stat(dst_path) except OSError: # Not found, copy from template if t == "000_config.py": input = open(src_path) output = open(dst_path, "w") for line in input: if "akeytochange" in line: # Generate a random hmac_key to secure the passwords in case # the database is compromised import uuid hmac_key = uuid.uuid4() line = 'settings.auth.hmac_key = "%s"' % hmac_key output.write(line) output.close() input.close() else: import shutil shutil.copy(src_path, dst_path) copied_from_template.append(template_files[t]) # @ToDo: WebSetup # http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/WebSetup #if not os.path.exists("%s/applications/websetup" % os.getcwd()): # # @ToDo: Check Permissions # # Copy files into this folder (@ToDo: Pythonise) # cp -r private/websetup "%s/applications" % os.getcwd() # Launch WebSetup #redirect(URL(a="websetup", c="default", f="index", # vars=dict(appname=request.application, # firstTime="True"))) else: # Found the file in the destination # Check if it has been edited import re edited_pattern = r"FINISHED_EDITING_\w*\s*=\s*(True|False)" edited_matcher = re.compile(edited_pattern).match has_edited = False with open(dst_path) as f: for line in f: edited_result = edited_matcher(line) if edited_result: has_edited = True edited = edited_result.group(1) break if has_edited and (edited != "True"): errors.append("Please edit %s before starting the system." % t) # Check if it's up to date (i.e. a critical update requirement) version_pattern = r"VERSION =\s*([0-9]+)" version_matcher = re.compile(version_pattern).match has_version = False with open(dst_path) as f: for line in f: version_result = version_matcher(line) if version_result: has_version = True version = version_result.group(1) break if not has_version: error = "Your %s is using settings from the old templates system. Please switch to the new templates system: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Templates" % t errors.append(error) elif int(version) != VERSION: error = "Your %s is using settings from template version %s. Please update with new settings from template version %s before starting the system." % \ (t, version, VERSION) errors.append(error) if copied_from_template: errors.append( "The following files were copied from templates and should be edited: %s" % ", ".join(copied_from_template)) return {"error_messages": errors, "warning_messages": warnings}
def update_check(settings): """ Check whether the dependencies are sufficient to run Eden @ToDo: Load deployment_settings so that we can configure the update_check - need to rework so that 000_config.py is parsed 1st @param settings: the deployment_settings """ # Get Web2py environment into our globals. #globals().update(**environment) request = current.request # Fatal errors errors = [] # Non-fatal warnings warnings = [] # ------------------------------------------------------------------------- # Check Python libraries # Get mandatory global dependencies app_path = request.folder gr_path = os.path.join(app_path, "requirements.txt") or_path = os.path.join(app_path, "optional_requirements.txt") global_dep = parse_requirements({}, gr_path) optional_dep = parse_requirements({}, or_path) templates = settings.get_template() if not isinstance(templates, (tuple, list)): templates = (templates,) template_dep = {} template_optional_dep = {} for template in templates: tr_path = os.path.join(app_path, "modules", "templates", template, "requirements.txt") tor_path = os.path.join(app_path, "modules", "templates", template, "optional_requirements.txt") parse_requirements(template_dep, tr_path) parse_requirements(template_optional_dep, tor_path) # Remove optional dependencies which are already accounted for in template dependencies unique = set(optional_dep.keys()).difference(set(template_dep.keys())) for dependency in optional_dep.keys(): if dependency not in unique: del optional_dep[dependency] # Override optional dependency messages from template unique = set(optional_dep.keys()).difference(set(template_optional_dep.keys())) for dependency in optional_dep.keys(): if dependency not in unique: del optional_dep[dependency] errors, warnings = s3_check_python_lib(global_dep, template_dep, template_optional_dep, optional_dep) # @ToDo: Move these to Template # for now this is done in s3db.climate_first_run() if settings.has_module("climate"): if settings.get_database_type() != "postgres": errors.append("Climate unresolved dependency: PostgreSQL required") try: import rpy2 except ImportError: errors.append("Climate unresolved dependency: RPy2 required") try: from Scientific.IO import NetCDF except ImportError: warnings.append("Climate unresolved dependency: NetCDF required if you want to import readings") try: from scipy import stats except ImportError: warnings.append("Climate unresolved dependency: SciPy required if you want to generate graphs on the map") # ------------------------------------------------------------------------- # Check Web2Py version # # Currently, the minimum usable Web2py is determined by whether the # Scheduler is available web2py_minimum_version = "Version 2.4.7-stable+timestamp.2013.05.27.11.49.44" # Offset of datetime in return value of parse_version. datetime_index = 4 web2py_version_ok = True try: from gluon.fileutils import parse_version except ImportError: web2py_version_ok = False if web2py_version_ok: try: web2py_minimum_parsed = parse_version(web2py_minimum_version) web2py_minimum_datetime = web2py_minimum_parsed[datetime_index] version_info = open("VERSION", "r") web2py_installed_version = version_info.read().split()[-1].strip() version_info.close() if isinstance(web2py_installed_version, str): # Post 2.4.2, global_settings.web2py_version is unparsed web2py_installed_parsed = parse_version(web2py_installed_version) web2py_installed_datetime = web2py_installed_parsed[datetime_index] else: # 2.4.2 & earlier style web2py_installed_datetime = web2py_installed_version[datetime_index] web2py_version_ok = web2py_installed_datetime >= web2py_minimum_datetime except: # Will get AttributeError if Web2py's parse_version is too old for # its current version format, which changed in 2.3.2. web2py_version_ok = False if not web2py_version_ok: warnings.append( "The installed version of Web2py is too old to support the current version of Sahana Eden." "\nPlease upgrade Web2py to at least version: %s" % \ web2py_minimum_version) # ------------------------------------------------------------------------- # Create required directories if needed databases_dir = os.path.join(app_path, "databases") try: os.stat(databases_dir) except OSError: # not found, create it os.mkdir(databases_dir) # ------------------------------------------------------------------------- # Copy in Templates # - 000_config.py (machine-specific settings) # - rest are run in-place # template_folder = os.path.join(app_path, "modules", "templates") template_files = { # source : destination "000_config.py" : os.path.join("models", "000_config.py"), } copied_from_template = [] for t in template_files: src_path = os.path.join(template_folder, t) dst_path = os.path.join(app_path, template_files[t]) try: os.stat(dst_path) except OSError: # Not found, copy from template if t == "000_config.py": input = open(src_path) output = open(dst_path, "w") for line in input: if "akeytochange" in line: # Generate a random hmac_key to secure the passwords in case # the database is compromised import uuid hmac_key = uuid.uuid4() line = 'settings.auth.hmac_key = "%s"' % hmac_key output.write(line) output.close() input.close() else: import shutil shutil.copy(src_path, dst_path) copied_from_template.append(template_files[t]) # @ToDo: WebSetup # http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/WebSetup #if not os.path.exists("%s/applications/websetup" % os.getcwd()): # # @ToDo: Check Permissions # # Copy files into this folder (@ToDo: Pythonise) # cp -r private/websetup "%s/applications" % os.getcwd() # Launch WebSetup #redirect(URL(a="websetup", c="default", f="index", # vars=dict(appname=request.application, # firstTime="True"))) else: # Found the file in the destination # Check if it has been edited import re edited_pattern = r"FINISHED_EDITING_\w*\s*=\s*(True|False)" edited_matcher = re.compile(edited_pattern).match has_edited = False with open(dst_path) as f: for line in f: edited_result = edited_matcher(line) if edited_result: has_edited = True edited = edited_result.group(1) break if has_edited and (edited != "True"): errors.append("Please edit %s before starting the system." % t) # Check if it's up to date (i.e. a critical update requirement) version_pattern = r"VERSION =\s*([0-9]+)" version_matcher = re.compile(version_pattern).match has_version = False with open(dst_path) as f: for line in f: version_result = version_matcher(line) if version_result: has_version = True version = version_result.group(1) break if not has_version: error = "Your %s is using settings from the old templates system. Please switch to the new templates system: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Templates" % t errors.append(error) elif int(version) != VERSION: error = "Your %s is using settings from template version %s. Please update with new settings from template version %s before starting the system." % \ (t, version, VERSION) errors.append(error) if copied_from_template: errors.append( "The following files were copied from templates and should be edited: %s" % ", ".join(copied_from_template)) return {"error_messages": errors, "warning_messages": warnings}
Python
def defaults(): """ Safe defaults for names in case the module is disabled """ dummy = S3ReusableField("dummy_id", "integer", readable = False, writable = False, ) return {"dvr_case_id": lambda name="case_id", **attr: \ dummy(name, **attr), "dvr_case_status_id": lambda name="status_id", **attr: \ dummy(name, **attr), }
def defaults(): """ Safe defaults for names in case the module is disabled """ dummy = S3ReusableField("dummy_id", "integer", readable = False, writable = False, ) return {"dvr_case_id": lambda name="case_id", **attr: \ dummy(name, **attr), "dvr_case_status_id": lambda name="status_id", **attr: \ dummy(name, **attr), }
Python
def case_onaccept(form, create=False): """ Case onaccept routine: - auto-create active appointments - count household size for new cases @param form: the FORM @param create: perform additional actions for new cases """ db = current.db s3db = current.s3db # Read form data form_vars = form.vars if "id" in form_vars: record_id = form_vars.id elif hasattr(form, "record_id"): record_id = form.record_id else: return # Get the case ctable = s3db.dvr_case stable = s3db.dvr_case_status left = stable.on(stable.id == ctable.status_id) query = (ctable.id == record_id) row = db(query).select(ctable.id, ctable.person_id, ctable.closed_on, stable.is_closed, left = left, limitby = (0, 1), ).first() if not row: return # Update closed_on date case = row.dvr_case if row.dvr_case_status.is_closed: if not case.closed_on: case.update_record(closed_on = current.request.utcnow.date()) elif case.closed_on: case.update_record(closed_on = None) # Get the person ID person_id = case.person_id atable = s3db.dvr_case_appointment ttable = s3db.dvr_case_appointment_type left = atable.on((atable.type_id == ttable.id) & (atable.person_id == person_id) & (atable.deleted != True)) query = (atable.id == None) & \ (ttable.active == True) & \ (ttable.deleted != True) rows = db(query).select(ttable.id, left=left) for row in rows: atable.insert(case_id = record_id, person_id = person_id, type_id = row.id, ) if create and \ current.deployment_settings.get_dvr_household_size() == "auto": # Count household size for newly created cases, in order # to catch pre-existing case group memberships gtable = s3db.pr_group mtable = s3db.pr_group_membership query = ((mtable.person_id == person_id) & \ (mtable.deleted != True) & \ (gtable.id == mtable.group_id) & \ (gtable.group_type == 7)) rows = db(query).select(gtable.id) for row in rows: dvr_case_household_size(row.id)
def case_onaccept(form, create=False): """ Case onaccept routine: - auto-create active appointments - count household size for new cases @param form: the FORM @param create: perform additional actions for new cases """ db = current.db s3db = current.s3db # Read form data form_vars = form.vars if "id" in form_vars: record_id = form_vars.id elif hasattr(form, "record_id"): record_id = form.record_id else: return # Get the case ctable = s3db.dvr_case stable = s3db.dvr_case_status left = stable.on(stable.id == ctable.status_id) query = (ctable.id == record_id) row = db(query).select(ctable.id, ctable.person_id, ctable.closed_on, stable.is_closed, left = left, limitby = (0, 1), ).first() if not row: return # Update closed_on date case = row.dvr_case if row.dvr_case_status.is_closed: if not case.closed_on: case.update_record(closed_on = current.request.utcnow.date()) elif case.closed_on: case.update_record(closed_on = None) # Get the person ID person_id = case.person_id atable = s3db.dvr_case_appointment ttable = s3db.dvr_case_appointment_type left = atable.on((atable.type_id == ttable.id) & (atable.person_id == person_id) & (atable.deleted != True)) query = (atable.id == None) & \ (ttable.active == True) & \ (ttable.deleted != True) rows = db(query).select(ttable.id, left=left) for row in rows: atable.insert(case_id = record_id, person_id = person_id, type_id = row.id, ) if create and \ current.deployment_settings.get_dvr_household_size() == "auto": # Count household size for newly created cases, in order # to catch pre-existing case group memberships gtable = s3db.pr_group mtable = s3db.pr_group_membership query = ((mtable.person_id == person_id) & \ (mtable.deleted != True) & \ (gtable.id == mtable.group_id) & \ (gtable.group_type == 7)) rows = db(query).select(gtable.id) for row in rows: dvr_case_household_size(row.id)
Python
def defaults(): """ Safe defaults for names in case the module is disabled """ dummy = S3ReusableField("dummy_id", "integer", readable = False, writable = False, ) return {"dvr_case_flag_id": lambda name="flag_id", **attr: \ dummy(name, **attr), }
def defaults(): """ Safe defaults for names in case the module is disabled """ dummy = S3ReusableField("dummy_id", "integer", readable = False, writable = False, ) return {"dvr_case_flag_id": lambda name="flag_id", **attr: \ dummy(name, **attr), }
Python
def defaults(): """ Safe defaults for names in case the module is disabled """ dummy = S3ReusableField("dummy_id", "integer", readable = False, writable = False, ) return {"dvr_need_id": lambda name="need_id", **attr: \ dummy(name, **attr), }
def defaults(): """ Safe defaults for names in case the module is disabled """ dummy = S3ReusableField("dummy_id", "integer", readable = False, writable = False, ) return {"dvr_need_id": lambda name="need_id", **attr: \ dummy(name, **attr), }
Python
def defaults(): """ Safe defaults for names in case the module is disabled """ dummy = S3ReusableField("dummy_id", "integer", readable = False, writable = False, ) return {"dvr_referral_type_id": lambda name="referral_type_id", **attr: \ dummy(name, **attr), }
def defaults(): """ Safe defaults for names in case the module is disabled """ dummy = S3ReusableField("dummy_id", "integer", readable = False, writable = False, ) return {"dvr_referral_type_id": lambda name="referral_type_id", **attr: \ dummy(name, **attr), }
Python
def response_status_onaccept(form): """ Onaccept routine for response statuses: - only one status can be the default @param form: the FORM """ form_vars = form.vars try: record_id = form_vars.id except AttributeError: record_id = None if not record_id: return table = current.s3db.dvr_response_status db = current.db # If this status is the default, then set is_default-flag # for all other statuses to False: if form_vars.get("is_default"): db(table.id != record_id).update(is_default = False) # If this status is the default closure, then enforce is_closed, # and set is_default_closure for all other statuses to False if form_vars.get("is_default_closure"): db(table.id == record_id).update(is_closed = True) db(table.id != record_id).update(is_default_closure = False)
def response_status_onaccept(form): """ Onaccept routine for response statuses: - only one status can be the default @param form: the FORM """ form_vars = form.vars try: record_id = form_vars.id except AttributeError: record_id = None if not record_id: return table = current.s3db.dvr_response_status db = current.db # If this status is the default, then set is_default-flag # for all other statuses to False: if form_vars.get("is_default"): db(table.id != record_id).update(is_default = False) # If this status is the default closure, then enforce is_closed, # and set is_default_closure for all other statuses to False if form_vars.get("is_default_closure"): db(table.id == record_id).update(is_closed = True) db(table.id != record_id).update(is_default_closure = False)
Python
def response_theme_ondelete_cascade(row): """ Explicit deletion cascade for response theme list:references (which are not caught by standard cascade), action depending on "ondelete" setting of response_theme_ids: - RESTRICT => block deletion cascade - otherwise => clean up the list:reference @param row: the dvr_response_theme Row to be deleted """ db = current.db theme_id = row.id # Table with list:reference dvr_response_theme atable = current.s3db.dvr_response_action reference = atable.response_theme_ids # Referencing rows query = (reference.contains(theme_id)) & \ (atable.deleted == False) if reference.ondelete == "RESTRICT": referenced_by = db(query).select(atable.id, limitby=(0, 1)).first() if referenced_by: # Raise to stop deletion cascade raise RuntimeError("Attempt to delete a theme that is referenced by another record") else: referenced_by = db(query).select(atable.id, reference) for row in referenced_by: # Clean up reference list theme_ids = row[reference] row.update_record(response_theme_ids = \ [tid for tid in theme_ids if tid != theme_id])
def response_theme_ondelete_cascade(row): """ Explicit deletion cascade for response theme list:references (which are not caught by standard cascade), action depending on "ondelete" setting of response_theme_ids: - RESTRICT => block deletion cascade - otherwise => clean up the list:reference @param row: the dvr_response_theme Row to be deleted """ db = current.db theme_id = row.id # Table with list:reference dvr_response_theme atable = current.s3db.dvr_response_action reference = atable.response_theme_ids # Referencing rows query = (reference.contains(theme_id)) & \ (atable.deleted == False) if reference.ondelete == "RESTRICT": referenced_by = db(query).select(atable.id, limitby=(0, 1)).first() if referenced_by: # Raise to stop deletion cascade raise RuntimeError("Attempt to delete a theme that is referenced by another record") else: referenced_by = db(query).select(atable.id, reference) for row in referenced_by: # Clean up reference list theme_ids = row[reference] row.update_record(response_theme_ids = \ [tid for tid in theme_ids if tid != theme_id])
Python
def defaults(): """ Safe defaults for names in case the module is disabled """ dummy = S3ReusableField("dummy_id", "integer", readable = False, writable = False, ) return {"dvr_activity_id": lambda name="activity_id", **attr: \ dummy(name, **attr), "dvr_case_activity_id": lambda name="case_activity_id", **attr: \ dummy(name, **attr), }
def defaults(): """ Safe defaults for names in case the module is disabled """ dummy = S3ReusableField("dummy_id", "integer", readable = False, writable = False, ) return {"dvr_activity_id": lambda name="activity_id", **attr: \ dummy(name, **attr), "dvr_case_activity_id": lambda name="case_activity_id", **attr: \ dummy(name, **attr), }
Python
def case_activity_status_onaccept(form): """ Onaccept routine for case activity statuses: - only one status can be the default @param form: the FORM """ form_vars = form.vars try: record_id = form_vars.id except AttributeError: record_id = None if not record_id: return # If this status is the default, then set is_default-flag # for all other statuses to False: if "is_default" in form_vars and form_vars.is_default: table = current.s3db.dvr_case_activity_status db = current.db db(table.id != record_id).update(is_default = False)
def case_activity_status_onaccept(form): """ Onaccept routine for case activity statuses: - only one status can be the default @param form: the FORM """ form_vars = form.vars try: record_id = form_vars.id except AttributeError: record_id = None if not record_id: return # If this status is the default, then set is_default-flag # for all other statuses to False: if "is_default" in form_vars and form_vars.is_default: table = current.s3db.dvr_case_activity_status db = current.db db(table.id != record_id).update(is_default = False)
Python
def case_activity_onvalidation(form): """ Validate case activity form: - end date must be after start date """ T = current.T form_vars = form.vars try: start = form_vars.start_date end = form_vars.end_date except AttributeError: return if start and end and end < start: form.errors["end_date"] = T("End date must be after start date")
def case_activity_onvalidation(form): """ Validate case activity form: - end date must be after start date """ T = current.T form_vars = form.vars try: start = form_vars.start_date end = form_vars.end_date except AttributeError: return if start and end and end < start: form.errors["end_date"] = T("End date must be after start date")
Python
def case_activity_close_responses(case_activity_id): """ Close all open response actions in a case activity @param case_activity_id: the case activity record ID """ db = current.db s3db = current.s3db rtable = s3db.dvr_response_action stable = s3db.dvr_response_status # Get all response actions for this case activity # that have an open-status (or no status at all): left = stable.on((stable.id == rtable.status_id) & \ (stable.deleted == False)) query = (rtable.case_activity_id == case_activity_id) & \ (rtable.deleted == False) & \ ((stable.is_closed == False) | (stable.id == None)) rows = db(query).select(rtable.id, left=left) if rows: # Get the default closure status, # (usually something like "obsolete") query = (stable.is_default_closure == True) & \ (stable.deleted == False) closure_status = db(query).select(stable.id, limitby = (0, 1), ).first() # Update all open response actions for this # case activity to the default closure status: if closure_status: response_ids = set(row.id for row in rows) query = rtable.id.belongs(response_ids) db(query).update(status_id = closure_status.id)
def case_activity_close_responses(case_activity_id): """ Close all open response actions in a case activity @param case_activity_id: the case activity record ID """ db = current.db s3db = current.s3db rtable = s3db.dvr_response_action stable = s3db.dvr_response_status # Get all response actions for this case activity # that have an open-status (or no status at all): left = stable.on((stable.id == rtable.status_id) & \ (stable.deleted == False)) query = (rtable.case_activity_id == case_activity_id) & \ (rtable.deleted == False) & \ ((stable.is_closed == False) | (stable.id == None)) rows = db(query).select(rtable.id, left=left) if rows: # Get the default closure status, # (usually something like "obsolete") query = (stable.is_default_closure == True) & \ (stable.deleted == False) closure_status = db(query).select(stable.id, limitby = (0, 1), ).first() # Update all open response actions for this # case activity to the default closure status: if closure_status: response_ids = set(row.id for row in rows) query = rtable.id.belongs(response_ids) db(query).update(status_id = closure_status.id)
Python
def case_activity_onaccept(cls, form): """ Onaccept-callback for case activites: - set end date when marked as completed - close any open response actions when marked as completed """ db = current.db s3db = current.s3db settings = current.deployment_settings # Read form data form_vars = form.vars if "id" in form_vars: record_id = form_vars.id elif hasattr(form, "record_id"): record_id = form.record_id else: return # Get current status and end_date of the record atable = s3db.dvr_case_activity query = (atable.id == record_id) activity = None is_closed = False if settings.get_dvr_case_activity_use_status(): # Use status_id stable = s3db.dvr_case_activity_status left = stable.on(atable.status_id == stable.id) row = db(query).select(atable.id, atable.end_date, stable.is_closed, left = left, limitby = (0, 1), ).first() if row: activity = row.dvr_case_activity is_closed = row.dvr_case_activity_status.is_closed else: # Use completed-flag row = db(query).select(atable.id, atable.end_date, atable.completed, limitby = (0, 1), ).first() if row: activity = row is_closed = row.completed if not activity: return if is_closed: # Cancel follow-ups for closed activities data = {"followup": False, "followup_date": None, } # Set end-date if not already set if not activity.end_date: data["end_date"] = current.request.utcnow.date() activity.update_record(**data) # Close any open response actions in this activity: if settings.get_dvr_manage_response_actions(): cls.case_activity_close_responses(activity.id) elif activity.end_date: # Remove end-date if present activity.update_record(end_date = None)
def case_activity_onaccept(cls, form): """ Onaccept-callback for case activites: - set end date when marked as completed - close any open response actions when marked as completed """ db = current.db s3db = current.s3db settings = current.deployment_settings # Read form data form_vars = form.vars if "id" in form_vars: record_id = form_vars.id elif hasattr(form, "record_id"): record_id = form.record_id else: return # Get current status and end_date of the record atable = s3db.dvr_case_activity query = (atable.id == record_id) activity = None is_closed = False if settings.get_dvr_case_activity_use_status(): # Use status_id stable = s3db.dvr_case_activity_status left = stable.on(atable.status_id == stable.id) row = db(query).select(atable.id, atable.end_date, stable.is_closed, left = left, limitby = (0, 1), ).first() if row: activity = row.dvr_case_activity is_closed = row.dvr_case_activity_status.is_closed else: # Use completed-flag row = db(query).select(atable.id, atable.end_date, atable.completed, limitby = (0, 1), ).first() if row: activity = row is_closed = row.completed if not activity: return if is_closed: # Cancel follow-ups for closed activities data = {"followup": False, "followup_date": None, } # Set end-date if not already set if not activity.end_date: data["end_date"] = current.request.utcnow.date() activity.update_record(**data) # Close any open response actions in this activity: if settings.get_dvr_manage_response_actions(): cls.case_activity_close_responses(activity.id) elif activity.end_date: # Remove end-date if present activity.update_record(end_date = None)
Python
def case_effort_onaccept(form): """ Onaccept-callback for dvr_case_effort: - inherit person_id from case_activity, unless specified in form or default @param form: the FORM """ # Read form data formvars = form.vars # Get the record ID if "id" in formvars: record_id = formvars.id elif hasattr(form, "record_id"): record_id = form.record_id else: record_id = None if not record_id: return s3db = current.s3db etable = s3db.dvr_case_effort field = etable.person_id if "person_id" not in formvars and not field.default: # Inherit person_id from the case activity atable = s3db.dvr_case_activity query = (etable.id == record_id) & \ (atable.id == etable.case_activity_id) row = current.db(query).select(etable.id, etable.person_id, atable.person_id, limitby = (0, 1), ).first() if row: effort = row.dvr_case_effort activity = row.dvr_case_activity if not effort.person_id: effort.update_record(person_id = activity.person_id)
def case_effort_onaccept(form): """ Onaccept-callback for dvr_case_effort: - inherit person_id from case_activity, unless specified in form or default @param form: the FORM """ # Read form data formvars = form.vars # Get the record ID if "id" in formvars: record_id = formvars.id elif hasattr(form, "record_id"): record_id = form.record_id else: record_id = None if not record_id: return s3db = current.s3db etable = s3db.dvr_case_effort field = etable.person_id if "person_id" not in formvars and not field.default: # Inherit person_id from the case activity atable = s3db.dvr_case_activity query = (etable.id == record_id) & \ (atable.id == etable.case_activity_id) row = current.db(query).select(etable.id, etable.person_id, atable.person_id, limitby = (0, 1), ).first() if row: effort = row.dvr_case_effort activity = row.dvr_case_activity if not effort.person_id: effort.update_record(person_id = activity.person_id)
Python
def defaults(): """ Safe defaults for names in case the module is disabled """ dummy = S3ReusableField("dummy_id", "integer", readable = False, writable = False, ) return {"dvr_appointment_status_opts": {}, "dvr_appointment_type_id": lambda name="type_id", **attr: \ dummy(name, **attr), }
def defaults(): """ Safe defaults for names in case the module is disabled """ dummy = S3ReusableField("dummy_id", "integer", readable = False, writable = False, ) return {"dvr_appointment_status_opts": {}, "dvr_appointment_type_id": lambda name="type_id", **attr: \ dummy(name, **attr), }
Python
def case_appointment_onvalidation(form): """ Validate appointment form - Future appointments can not be set to completed - Undated appointments can not be set to completed @param form: the FORM """ formvars = form.vars date = formvars.get("date") status = formvars.get("status") if str(status) == "4": if date is None: form.errors["date"] = current.T("Date is required when marking the appointment as completed") elif date > current.request.utcnow.date(): form.errors["status"] = current.T("Appointments with future dates can not be marked as completed")
def case_appointment_onvalidation(form): """ Validate appointment form - Future appointments can not be set to completed - Undated appointments can not be set to completed @param form: the FORM """ formvars = form.vars date = formvars.get("date") status = formvars.get("status") if str(status) == "4": if date is None: form.errors["date"] = current.T("Date is required when marking the appointment as completed") elif date > current.request.utcnow.date(): form.errors["status"] = current.T("Appointments with future dates can not be marked as completed")
Python
def case_appointment_onaccept(form): """ Actions after creating/updating appointments - Update last_seen_on in the corresponding case(s) - Update the case status if configured to do so @param form: the FORM """ # Read form data formvars = form.vars if "id" in formvars: record_id = formvars.id elif hasattr(form, "record_id"): record_id = form.record_id else: record_id = None if not record_id: return db = current.db s3db = current.s3db settings = current.deployment_settings table = s3db.dvr_case_appointment person_id = formvars.get("person_id") case_id = formvars.get("case_id") if not person_id or not case_id: row = db(table.id == record_id).select(table.case_id, table.person_id, limitby = (0, 1), ).first() if row: person_id = row.person_id case_id = row.case_id if settings.get_dvr_appointments_update_last_seen_on() and person_id: # Update last_seen_on dvr_update_last_seen(person_id) # Update the case status if appointment is completed # NB appointment status "completed" must be set by this form if settings.get_dvr_appointments_update_case_status() and \ s3_str(formvars.get("status")) == "4": # Get the case status to be set when appointment is completed ttable = s3db.dvr_case_appointment_type query = (table.id == record_id) & \ (table.deleted != True) & \ (ttable.id == table.type_id) & \ (ttable.status_id != None) row = db(query).select(table.date, ttable.status_id, limitby = (0, 1), ).first() if row: # Check whether there is a later appointment that # would have set a different case status (we don't # want to override this when closing appointments # restrospectively): date = row.dvr_case_appointment.date if not date: # Assume today if no date given date = current.request.utcnow.date() status_id = row.dvr_case_appointment_type.status_id query = (table.person_id == person_id) if case_id: query &= (table.case_id == case_id) query &= (table.date != None) & \ (table.status == 4) & \ (table.date > date) & \ (table.deleted != True) & \ (ttable.id == table.type_id) & \ (ttable.status_id != None) & \ (ttable.status_id != status_id) later = db(query).select(table.id, limitby = (0, 1)).first() if later: status_id = None else: status_id = None if status_id: # Update the corresponding case(s) # NB appointments without case_id update all cases for the person ctable = s3db.dvr_case stable = s3db.dvr_case_status query = (ctable.person_id == person_id) & \ (ctable.archived != True) & \ (ctable.deleted != True) & \ (stable.id == ctable.status_id) & \ (stable.is_closed != True) if case_id: query &= (ctable.id == case_id) cases = db(query).select(ctable.id, ctable.person_id, ctable.archived, ) has_permission = current.auth.s3_has_permission for case in cases: if has_permission("update", ctable, record_id=case.id): # Customise case resource r = S3Request("dvr", "case", current.request, args = [], get_vars = {}, ) r.customise_resource("dvr_case") # Update case status + run onaccept case.update_record(status_id = status_id) s3db.onaccept(ctable, case, method="update")
def case_appointment_onaccept(form): """ Actions after creating/updating appointments - Update last_seen_on in the corresponding case(s) - Update the case status if configured to do so @param form: the FORM """ # Read form data formvars = form.vars if "id" in formvars: record_id = formvars.id elif hasattr(form, "record_id"): record_id = form.record_id else: record_id = None if not record_id: return db = current.db s3db = current.s3db settings = current.deployment_settings table = s3db.dvr_case_appointment person_id = formvars.get("person_id") case_id = formvars.get("case_id") if not person_id or not case_id: row = db(table.id == record_id).select(table.case_id, table.person_id, limitby = (0, 1), ).first() if row: person_id = row.person_id case_id = row.case_id if settings.get_dvr_appointments_update_last_seen_on() and person_id: # Update last_seen_on dvr_update_last_seen(person_id) # Update the case status if appointment is completed # NB appointment status "completed" must be set by this form if settings.get_dvr_appointments_update_case_status() and \ s3_str(formvars.get("status")) == "4": # Get the case status to be set when appointment is completed ttable = s3db.dvr_case_appointment_type query = (table.id == record_id) & \ (table.deleted != True) & \ (ttable.id == table.type_id) & \ (ttable.status_id != None) row = db(query).select(table.date, ttable.status_id, limitby = (0, 1), ).first() if row: # Check whether there is a later appointment that # would have set a different case status (we don't # want to override this when closing appointments # restrospectively): date = row.dvr_case_appointment.date if not date: # Assume today if no date given date = current.request.utcnow.date() status_id = row.dvr_case_appointment_type.status_id query = (table.person_id == person_id) if case_id: query &= (table.case_id == case_id) query &= (table.date != None) & \ (table.status == 4) & \ (table.date > date) & \ (table.deleted != True) & \ (ttable.id == table.type_id) & \ (ttable.status_id != None) & \ (ttable.status_id != status_id) later = db(query).select(table.id, limitby = (0, 1)).first() if later: status_id = None else: status_id = None if status_id: # Update the corresponding case(s) # NB appointments without case_id update all cases for the person ctable = s3db.dvr_case stable = s3db.dvr_case_status query = (ctable.person_id == person_id) & \ (ctable.archived != True) & \ (ctable.deleted != True) & \ (stable.id == ctable.status_id) & \ (stable.is_closed != True) if case_id: query &= (ctable.id == case_id) cases = db(query).select(ctable.id, ctable.person_id, ctable.archived, ) has_permission = current.auth.s3_has_permission for case in cases: if has_permission("update", ctable, record_id=case.id): # Customise case resource r = S3Request("dvr", "case", current.request, args = [], get_vars = {}, ) r.customise_resource("dvr_case") # Update case status + run onaccept case.update_record(status_id = status_id) s3db.onaccept(ctable, case, method="update")
Python
def case_appointment_ondelete(row): """ Actions after deleting appointments - Update last_seen_on in the corresponding case(s) @param row: the deleted Row """ if current.deployment_settings.get_dvr_appointments_update_last_seen_on(): # Get the deleted keys table = current.s3db.dvr_case_appointment row = current.db(table.id == row.id).select(table.deleted_fk, limitby = (0, 1), ).first() if row and row.deleted_fk: # Get the person ID try: deleted_fk = json.loads(row.deleted_fk) except (ValueError, TypeError): person_id = None else: person_id = deleted_fk.get("person_id") # Update last_seen_on if person_id: dvr_update_last_seen(person_id)
def case_appointment_ondelete(row): """ Actions after deleting appointments - Update last_seen_on in the corresponding case(s) @param row: the deleted Row """ if current.deployment_settings.get_dvr_appointments_update_last_seen_on(): # Get the deleted keys table = current.s3db.dvr_case_appointment row = current.db(table.id == row.id).select(table.deleted_fk, limitby = (0, 1), ).first() if row and row.deleted_fk: # Get the person ID try: deleted_fk = json.loads(row.deleted_fk) except (ValueError, TypeError): person_id = None else: person_id = deleted_fk.get("person_id") # Update last_seen_on if person_id: dvr_update_last_seen(person_id)
Python
def defaults(): """ Safe defaults for names in case the module is disabled """ dummy = S3ReusableField("dummy_id", "integer", readable = False, writable = False, ) return {"dvr_beneficiary_type_id": lambda name="beneficiary_type_id", **attr: \ dummy(name, **attr), }
def defaults(): """ Safe defaults for names in case the module is disabled """ dummy = S3ReusableField("dummy_id", "integer", readable = False, writable = False, ) return {"dvr_beneficiary_type_id": lambda name="beneficiary_type_id", **attr: \ dummy(name, **attr), }
Python
def allowance_onvalidation(form): """ Validate allowance form - Status paid requires paid_on date @param form: the FORM """ formvars = form.vars date = formvars.get("paid_on") status = formvars.get("status") if str(status) == "2" and not date: form.errors["paid_on"] = current.T("Date of payment required")
def allowance_onvalidation(form): """ Validate allowance form - Status paid requires paid_on date @param form: the FORM """ formvars = form.vars date = formvars.get("paid_on") status = formvars.get("status") if str(status) == "2" and not date: form.errors["paid_on"] = current.T("Date of payment required")
Python
def allowance_onaccept(form): """ Actions after creating/updating allowance information - update last_seen_on """ if current.deployment_settings.get_dvr_payments_update_last_seen_on(): # Read form data form_vars = form.vars if "id" in form_vars: record_id = form_vars.id elif hasattr(form, "record_id"): record_id = form.record_id else: record_id = None if not record_id: return if current.response.s3.bulk and "status" not in form_vars: # Import without status change won't affect last_seen_on, # so we can skip this check for better performance return # Get the person ID table = current.s3db.dvr_allowance row = current.db(table.id == record_id).select(table.person_id, limitby = (0, 1), ).first() # Update last_seen_on if row: dvr_update_last_seen(row.person_id)
def allowance_onaccept(form): """ Actions after creating/updating allowance information - update last_seen_on """ if current.deployment_settings.get_dvr_payments_update_last_seen_on(): # Read form data form_vars = form.vars if "id" in form_vars: record_id = form_vars.id elif hasattr(form, "record_id"): record_id = form.record_id else: record_id = None if not record_id: return if current.response.s3.bulk and "status" not in form_vars: # Import without status change won't affect last_seen_on, # so we can skip this check for better performance return # Get the person ID table = current.s3db.dvr_allowance row = current.db(table.id == record_id).select(table.person_id, limitby = (0, 1), ).first() # Update last_seen_on if row: dvr_update_last_seen(row.person_id)
Python
def allowance_ondelete(row): """ Actions after deleting allowance information - Update last_seen_on in the corresponding case(s) @param row: the deleted Row """ if current.deployment_settings.get_dvr_payments_update_last_seen_on(): # Get the deleted keys table = current.s3db.dvr_allowance row = current.db(table.id == row.id).select(table.deleted_fk, limitby = (0, 1), ).first() if row and row.deleted_fk: # Get the person ID try: deleted_fk = json.loads(row.deleted_fk) except (ValueError, TypeError): person_id = None else: person_id = deleted_fk.get("person_id") # Update last_seen_on if person_id: dvr_update_last_seen(person_id)
def allowance_ondelete(row): """ Actions after deleting allowance information - Update last_seen_on in the corresponding case(s) @param row: the deleted Row """ if current.deployment_settings.get_dvr_payments_update_last_seen_on(): # Get the deleted keys table = current.s3db.dvr_allowance row = current.db(table.id == row.id).select(table.deleted_fk, limitby = (0, 1), ).first() if row and row.deleted_fk: # Get the person ID try: deleted_fk = json.loads(row.deleted_fk) except (ValueError, TypeError): person_id = None else: person_id = deleted_fk.get("person_id") # Update last_seen_on if person_id: dvr_update_last_seen(person_id)
Python
def case_event_type_onaccept(form): """ Onaccept routine for case event types: - only one type can be the default @param form: the FORM """ form_vars = form.vars try: record_id = form_vars.id except AttributeError: record_id = None if not record_id: return # If this type is the default, then set is_default-flag # for all other types to False: if "is_default" in form_vars and form_vars.is_default: table = current.s3db.dvr_case_event_type db = current.db db(table.id != record_id).update(is_default = False)
def case_event_type_onaccept(form): """ Onaccept routine for case event types: - only one type can be the default @param form: the FORM """ form_vars = form.vars try: record_id = form_vars.id except AttributeError: record_id = None if not record_id: return # If this type is the default, then set is_default-flag # for all other types to False: if "is_default" in form_vars and form_vars.is_default: table = current.s3db.dvr_case_event_type db = current.db db(table.id != record_id).update(is_default = False)
Python
def case_event_create_onaccept(form): """ Actions after creation of a case event: - update last_seen_on in the corresponding cases - close appointments if configured to do so @param form: the FORM """ formvars = form.vars try: record_id = formvars.id except AttributeError: record_id = None if not record_id: return db = current.db s3db = current.s3db close_appointments = current.deployment_settings \ .get_dvr_case_events_close_appointments() case_id = formvars.get("case_id") person_id = formvars.get("person_id") type_id = formvars.get("type_id") if not person_id or not type_id or \ close_appointments and not case_id: # Reload the record table = s3db.dvr_case_event row = db(table.id == record_id).select(table.case_id, table.person_id, table.type_id, limitby = (0, 1), ).first() if not row: return case_id = row.case_id person_id = row.person_id type_id = row.type_id if not person_id: return # Get the event type ttable = s3db.dvr_case_event_type query = (ttable.id == type_id) & \ (ttable.deleted == False) event_type = db(query).select(ttable.presence_required, ttable.appointment_type_id, limitby = (0, 1), ).first() if not event_type: return # Update last_seen (if event type requires personal presence) if event_type.presence_required: dvr_update_last_seen(person_id) # Close appointments appointment_type_id = event_type.appointment_type_id if close_appointments and appointment_type_id: today = current.request.utcnow.date() atable = s3db.dvr_case_appointment query = (atable.type_id == appointment_type_id) & \ (atable.person_id == person_id) & \ ((atable.date == None) | (atable.date <= today)) & \ (atable.deleted == False) if case_id: query &= (atable.case_id == case_id) | \ (atable.case_id == None) rows = db(query).select(atable.id, atable.date, atable.status, orderby = ~atable.date, ) data = {"date": today, "status": 4} if not rows: # No appointment of this type yet # => create a new closed appointment data["type_id"] = appointment_type_id data["person_id"] = person_id data["case_id"] = case_id aresource = s3db.resource("dvr_case_appointment") try: record_id = aresource.insert(**data) except S3PermissionError: current.log.error("Event Registration: %s" % sys.exc_info()[1]) else: update = None # Find key dates undated = open_today = closed_today = previous = None for row in rows: if row.date is None: if not undated: # An appointment without date undated = row elif row.date == today: if row.status != 4: # An open or cancelled appointment today open_today = row else: # A closed appointment today closed_today = row elif previous is None: # The last appointment before today previous = row if open_today: # If we have an open appointment for today, update it update = open_today elif closed_today: # If we already have a closed appointment for today, # do nothing update = None elif previous: if previous.status not in (1, 2, 3): # Last appointment before today is closed # => create a new one unless there is an undated one if undated: update = undated else: # Last appointment before today is still open # => update it update = previous else: update = undated if update: # Update the appointment permitted = current.auth.s3_has_permission("update", atable, record_id=update.id, ) if permitted: # Customise appointment resource r = S3Request("dvr", "case_appointment", current.request, args = [], get_vars = {}, ) r.customise_resource("dvr_case_appointment") # Update appointment success = update.update_record(**data) if success: data["id"] = update.id s3db.onaccept(atable, data, method="update") else: current.log.error("Event Registration: could not update appointment %s" % update.id) else: current.log.error("Event registration: not permitted to update appointment %s" % update.id)
def case_event_create_onaccept(form): """ Actions after creation of a case event: - update last_seen_on in the corresponding cases - close appointments if configured to do so @param form: the FORM """ formvars = form.vars try: record_id = formvars.id except AttributeError: record_id = None if not record_id: return db = current.db s3db = current.s3db close_appointments = current.deployment_settings \ .get_dvr_case_events_close_appointments() case_id = formvars.get("case_id") person_id = formvars.get("person_id") type_id = formvars.get("type_id") if not person_id or not type_id or \ close_appointments and not case_id: # Reload the record table = s3db.dvr_case_event row = db(table.id == record_id).select(table.case_id, table.person_id, table.type_id, limitby = (0, 1), ).first() if not row: return case_id = row.case_id person_id = row.person_id type_id = row.type_id if not person_id: return # Get the event type ttable = s3db.dvr_case_event_type query = (ttable.id == type_id) & \ (ttable.deleted == False) event_type = db(query).select(ttable.presence_required, ttable.appointment_type_id, limitby = (0, 1), ).first() if not event_type: return # Update last_seen (if event type requires personal presence) if event_type.presence_required: dvr_update_last_seen(person_id) # Close appointments appointment_type_id = event_type.appointment_type_id if close_appointments and appointment_type_id: today = current.request.utcnow.date() atable = s3db.dvr_case_appointment query = (atable.type_id == appointment_type_id) & \ (atable.person_id == person_id) & \ ((atable.date == None) | (atable.date <= today)) & \ (atable.deleted == False) if case_id: query &= (atable.case_id == case_id) | \ (atable.case_id == None) rows = db(query).select(atable.id, atable.date, atable.status, orderby = ~atable.date, ) data = {"date": today, "status": 4} if not rows: # No appointment of this type yet # => create a new closed appointment data["type_id"] = appointment_type_id data["person_id"] = person_id data["case_id"] = case_id aresource = s3db.resource("dvr_case_appointment") try: record_id = aresource.insert(**data) except S3PermissionError: current.log.error("Event Registration: %s" % sys.exc_info()[1]) else: update = None # Find key dates undated = open_today = closed_today = previous = None for row in rows: if row.date is None: if not undated: # An appointment without date undated = row elif row.date == today: if row.status != 4: # An open or cancelled appointment today open_today = row else: # A closed appointment today closed_today = row elif previous is None: # The last appointment before today previous = row if open_today: # If we have an open appointment for today, update it update = open_today elif closed_today: # If we already have a closed appointment for today, # do nothing update = None elif previous: if previous.status not in (1, 2, 3): # Last appointment before today is closed # => create a new one unless there is an undated one if undated: update = undated else: # Last appointment before today is still open # => update it update = previous else: update = undated if update: # Update the appointment permitted = current.auth.s3_has_permission("update", atable, record_id=update.id, ) if permitted: # Customise appointment resource r = S3Request("dvr", "case_appointment", current.request, args = [], get_vars = {}, ) r.customise_resource("dvr_case_appointment") # Update appointment success = update.update_record(**data) if success: data["id"] = update.id s3db.onaccept(atable, data, method="update") else: current.log.error("Event Registration: could not update appointment %s" % update.id) else: current.log.error("Event registration: not permitted to update appointment %s" % update.id)
Python
def case_event_ondelete(row): """ Actions after deleting a case event: - update last_seen_on in the corresponding cases @param row: the deleted Row """ # Get the deleted keys table = current.s3db.dvr_case_event row = current.db(table.id == row.id).select(table.deleted_fk, limitby = (0, 1), ).first() if row and row.deleted_fk: # Get the person ID try: deleted_fk = json.loads(row.deleted_fk) except (ValueError, TypeError): person_id = None else: person_id = deleted_fk.get("person_id") # Update last_seen_on if person_id: dvr_update_last_seen(person_id)
def case_event_ondelete(row): """ Actions after deleting a case event: - update last_seen_on in the corresponding cases @param row: the deleted Row """ # Get the deleted keys table = current.s3db.dvr_case_event row = current.db(table.id == row.id).select(table.deleted_fk, limitby = (0, 1), ).first() if row and row.deleted_fk: # Get the person ID try: deleted_fk = json.loads(row.deleted_fk) except (ValueError, TypeError): person_id = None else: person_id = deleted_fk.get("person_id") # Update last_seen_on if person_id: dvr_update_last_seen(person_id)
Python
def dvr_case_status_filter_opts(closed=None): """ Get filter options for case status, ordered by workflow position @return: OrderedDict of options @note: set sort=False for filter widget to retain this order """ table = current.s3db.dvr_case_status query = (table.deleted != True) if closed is not None: if closed: query &= (table.is_closed == True) else: query &= ((table.is_closed == False) | (table.is_closed == None)) rows = current.db(query).select(table.id, table.name, orderby = "workflow_position", ) if not rows: return {} T = current.T return OrderedDict((row.id, T(row.name)) for row in rows)
def dvr_case_status_filter_opts(closed=None): """ Get filter options for case status, ordered by workflow position @return: OrderedDict of options @note: set sort=False for filter widget to retain this order """ table = current.s3db.dvr_case_status query = (table.deleted != True) if closed is not None: if closed: query &= (table.is_closed == True) else: query &= ((table.is_closed == False) | (table.is_closed == None)) rows = current.db(query).select(table.id, table.name, orderby = "workflow_position", ) if not rows: return {} T = current.T return OrderedDict((row.id, T(row.name)) for row in rows)
Python
def dvr_case_activity_default_status(): """ Helper to get/set the default status for case activities @return: the default status_id """ s3db = current.s3db rtable = s3db.dvr_case_activity field = rtable.status_id default = field.default if not default: # Look up the default status stable = s3db.dvr_case_activity_status query = (stable.is_default == True) & \ (stable.deleted != True) row = current.db(query).select(stable.id, limitby=(0, 1)).first() if row: # Set as field default in case activity table default = field.default = row.id return default
def dvr_case_activity_default_status(): """ Helper to get/set the default status for case activities @return: the default status_id """ s3db = current.s3db rtable = s3db.dvr_case_activity field = rtable.status_id default = field.default if not default: # Look up the default status stable = s3db.dvr_case_activity_status query = (stable.is_default == True) & \ (stable.deleted != True) row = current.db(query).select(stable.id, limitby=(0, 1)).first() if row: # Set as field default in case activity table default = field.default = row.id return default
Python
def dvr_response_default_status(): """ Helper to get/set the default status for response records @return: the default status_id """ s3db = current.s3db rtable = s3db.dvr_response_action field = rtable.status_id default = field.default if not default: # Look up the default status stable = s3db.dvr_response_status query = (stable.is_default == True) & \ (stable.deleted != True) row = current.db(query).select(stable.id, limitby=(0, 1)).first() if row: # Set as field default in responses table default = field.default = row.id return default
def dvr_response_default_status(): """ Helper to get/set the default status for response records @return: the default status_id """ s3db = current.s3db rtable = s3db.dvr_response_action field = rtable.status_id default = field.default if not default: # Look up the default status stable = s3db.dvr_response_status query = (stable.is_default == True) & \ (stable.deleted != True) row = current.db(query).select(stable.id, limitby=(0, 1)).first() if row: # Set as field default in responses table default = field.default = row.id return default
Python
def dvr_case_household_size(group_id): """ Update the household_size for all cases in the given case group, taking into account that the same person could belong to multiple case groups. To be called onaccept of pr_group_membership if automatic household size is enabled @param group_id: the group_id of the case group (group_type == 7) """ db = current.db s3db = current.s3db ptable = s3db.pr_person gtable = s3db.pr_group mtable = s3db.pr_group_membership # Get all persons related to this group_id, make sure this is a case group join = [mtable.on((mtable.group_id == gtable.id) & (mtable.deleted != True)), ptable.on(ptable.id == mtable.person_id) ] query = (gtable.id == group_id) & \ (gtable.group_type == 7) & \ (gtable.deleted != True) rows = db(query).select(ptable.id, join=join) person_ids = set([row.id for row in rows]) if person_ids: # Get case group members for each of these person_ids ctable = s3db.dvr_case rtable = ctable.with_alias("member_cases") otable = mtable.with_alias("case_members") join = ctable.on(ctable.person_id == mtable.person_id) left = [otable.on((otable.group_id == mtable.group_id) & (otable.deleted != True)), rtable.on(rtable.person_id == otable.person_id), ] query = (mtable.person_id.belongs(person_ids)) & \ (mtable.deleted != True) & \ (rtable.id != None) rows = db(query).select(ctable.id, otable.person_id, join = join, left = left, ) # Count heads CASE = str(ctable.id) MEMBER = str(otable.person_id) groups = {} for row in rows: member_id = row[MEMBER] case_id = row[CASE] if case_id not in groups: groups[case_id] = set([member_id]) else: groups[case_id].add(member_id) # Update the related cases for case_id, members in groups.items(): number_of_members = len(members) db(ctable.id == case_id).update(household_size = number_of_members)
def dvr_case_household_size(group_id): """ Update the household_size for all cases in the given case group, taking into account that the same person could belong to multiple case groups. To be called onaccept of pr_group_membership if automatic household size is enabled @param group_id: the group_id of the case group (group_type == 7) """ db = current.db s3db = current.s3db ptable = s3db.pr_person gtable = s3db.pr_group mtable = s3db.pr_group_membership # Get all persons related to this group_id, make sure this is a case group join = [mtable.on((mtable.group_id == gtable.id) & (mtable.deleted != True)), ptable.on(ptable.id == mtable.person_id) ] query = (gtable.id == group_id) & \ (gtable.group_type == 7) & \ (gtable.deleted != True) rows = db(query).select(ptable.id, join=join) person_ids = set([row.id for row in rows]) if person_ids: # Get case group members for each of these person_ids ctable = s3db.dvr_case rtable = ctable.with_alias("member_cases") otable = mtable.with_alias("case_members") join = ctable.on(ctable.person_id == mtable.person_id) left = [otable.on((otable.group_id == mtable.group_id) & (otable.deleted != True)), rtable.on(rtable.person_id == otable.person_id), ] query = (mtable.person_id.belongs(person_ids)) & \ (mtable.deleted != True) & \ (rtable.id != None) rows = db(query).select(ctable.id, otable.person_id, join = join, left = left, ) # Count heads CASE = str(ctable.id) MEMBER = str(otable.person_id) groups = {} for row in rows: member_id = row[MEMBER] case_id = row[CASE] if case_id not in groups: groups[case_id] = set([member_id]) else: groups[case_id].add(member_id) # Update the related cases for case_id, members in groups.items(): number_of_members = len(members) db(ctable.id == case_id).update(household_size = number_of_members)