repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
SiLab-Bonn/basil | basil/HL/GPAC.py | GPAC.set_current | def set_current(self, channel, value, unit='A'):
'''Setting current of current source
'''
dac_offset = self._ch_cal[channel]['DAC']['offset']
dac_gain = self._ch_cal[channel]['DAC']['gain']
if unit == 'raw':
value = value
elif unit == 'A':
value = int((-value * 1000000 - dac_offset) / dac_gain) # fix sign of output
elif unit == 'mA':
value = int((-value * 1000 - dac_offset) / dac_gain) # fix sign of output
elif unit == 'uA':
value = int((-value - dac_offset) / dac_gain) # fix sign of output
else:
raise TypeError("Invalid unit type.")
self._set_dac_value(channel=channel, value=value) | python | def set_current(self, channel, value, unit='A'):
'''Setting current of current source
'''
dac_offset = self._ch_cal[channel]['DAC']['offset']
dac_gain = self._ch_cal[channel]['DAC']['gain']
if unit == 'raw':
value = value
elif unit == 'A':
value = int((-value * 1000000 - dac_offset) / dac_gain) # fix sign of output
elif unit == 'mA':
value = int((-value * 1000 - dac_offset) / dac_gain) # fix sign of output
elif unit == 'uA':
value = int((-value - dac_offset) / dac_gain) # fix sign of output
else:
raise TypeError("Invalid unit type.")
self._set_dac_value(channel=channel, value=value) | Setting current of current source | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/GPAC.py#L858-L874 |
SiLab-Bonn/basil | basil/HL/MIO_PLL.py | MIO_PLL._calculateParameters | def _calculateParameters(self, fout):
q_d_f = 0
'''
fout = fref * (p_total / q_total) * (1 / div)
p_total = 2 * ((p_counter + 4) + p_0) [16..1023]
q_total = q_counter + 2 [2..129]
div = [2,(3),4..127]
constraints:
f_ref * p_total / q_total = [100..400] MHz
f_ref / q_total > 0.25 MHz
'''
for self.q_counter in range(128):
self.q_total = self.q_counter + 2
if (self.fref / self.q_total) < 0.25: # PLL constraint
break
for self.div in range(2, 128):
q_d_f = self.q_total * self.div * fout
if isinstance(q_d_f, six.integer_types) and q_d_f > (15 * self.fref): # = f0 * p
if int(q_d_f) % int(self.fref) == 0: # p, q, and d found
self.p_total = q_d_f / self.fref
while self.p_total <= 16: # counter constraint
self.p_total = self.p_total * 2
self.div = self.div * 2
if self.div > 127:
break
if self.p_total > 1023:
break
if ((self.fref * self.p_total / self.q_total) < 100 or (self.fref * self.p_total / self.q_total) > 400): # PLL constraint
break
if int(self.p_total) % 2 == 0:
self.p_0 = 0
else:
self.p_0 = 1
self.p_counter = ((int(self.p_total) - self.p_0) / 2) - 4 # set p counter value
if self.div == 2:
self.clk1SRC = 0x02
self.div1N = 4
else:
if self.div == 3:
self.clk1SRC = 0x03
self.div1N = 6
else:
self.clk1SRC = 0x01
self.div1N = self.div
if self.p_total <= 44:
self.chg_pump = 0
else:
if self.p_total <= 479:
self.chg_pump = 1
else:
if self.p_total <= 639:
self.chg_pump = 2
else:
if self.p_total <= 799:
self.chg_pump = 3
else:
if self.p_total <= 1023:
self.chg_pump = 4
ftest = self.fref * self.p_total / self.q_total * 1 / self.div
fvco = self.fref * self.p_total / self.q_total
logger.info('PLL frequency set to ' + str(ftest) + ' MHz' + ' (VCO @ ' + str(fvco) + ' MHz)')
return True
logger.error('MIO_PLL: Could not find PLL parameters')
return False | python | def _calculateParameters(self, fout):
q_d_f = 0
'''
fout = fref * (p_total / q_total) * (1 / div)
p_total = 2 * ((p_counter + 4) + p_0) [16..1023]
q_total = q_counter + 2 [2..129]
div = [2,(3),4..127]
constraints:
f_ref * p_total / q_total = [100..400] MHz
f_ref / q_total > 0.25 MHz
'''
for self.q_counter in range(128):
self.q_total = self.q_counter + 2
if (self.fref / self.q_total) < 0.25: # PLL constraint
break
for self.div in range(2, 128):
q_d_f = self.q_total * self.div * fout
if isinstance(q_d_f, six.integer_types) and q_d_f > (15 * self.fref): # = f0 * p
if int(q_d_f) % int(self.fref) == 0: # p, q, and d found
self.p_total = q_d_f / self.fref
while self.p_total <= 16: # counter constraint
self.p_total = self.p_total * 2
self.div = self.div * 2
if self.div > 127:
break
if self.p_total > 1023:
break
if ((self.fref * self.p_total / self.q_total) < 100 or (self.fref * self.p_total / self.q_total) > 400): # PLL constraint
break
if int(self.p_total) % 2 == 0:
self.p_0 = 0
else:
self.p_0 = 1
self.p_counter = ((int(self.p_total) - self.p_0) / 2) - 4 # set p counter value
if self.div == 2:
self.clk1SRC = 0x02
self.div1N = 4
else:
if self.div == 3:
self.clk1SRC = 0x03
self.div1N = 6
else:
self.clk1SRC = 0x01
self.div1N = self.div
if self.p_total <= 44:
self.chg_pump = 0
else:
if self.p_total <= 479:
self.chg_pump = 1
else:
if self.p_total <= 639:
self.chg_pump = 2
else:
if self.p_total <= 799:
self.chg_pump = 3
else:
if self.p_total <= 1023:
self.chg_pump = 4
ftest = self.fref * self.p_total / self.q_total * 1 / self.div
fvco = self.fref * self.p_total / self.q_total
logger.info('PLL frequency set to ' + str(ftest) + ' MHz' + ' (VCO @ ' + str(fvco) + ' MHz)')
return True
logger.error('MIO_PLL: Could not find PLL parameters')
return False | fout = fref * (p_total / q_total) * (1 / div)
p_total = 2 * ((p_counter + 4) + p_0) [16..1023]
q_total = q_counter + 2 [2..129]
div = [2,(3),4..127]
constraints:
f_ref * p_total / q_total = [100..400] MHz
f_ref / q_total > 0.25 MHz | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/MIO_PLL.py#L111-L180 |
SiLab-Bonn/basil | basil/TL/Visa.py | Visa.init | def init(self):
'''
Initialize the device.
Parameters of visa.ResourceManager().open_resource()
'''
super(Visa, self).init()
backend = self._init.get('backend', '') # Empty string means std. backend (NI VISA)
rm = visa.ResourceManager(backend)
try:
logger.info('BASIL VISA TL with %s backend found the following devices: %s', backend, ", ".join(rm.list_resources()))
except NotImplementedError: # some backends do not always implement the list_resources function
logger.info('BASIL VISA TL with %s backend', backend)
self._resource = rm.open_resource(**{key: value for key, value in self._init.items() if key not in ("backend",)}) | python | def init(self):
'''
Initialize the device.
Parameters of visa.ResourceManager().open_resource()
'''
super(Visa, self).init()
backend = self._init.get('backend', '') # Empty string means std. backend (NI VISA)
rm = visa.ResourceManager(backend)
try:
logger.info('BASIL VISA TL with %s backend found the following devices: %s', backend, ", ".join(rm.list_resources()))
except NotImplementedError: # some backends do not always implement the list_resources function
logger.info('BASIL VISA TL with %s backend', backend)
self._resource = rm.open_resource(**{key: value for key, value in self._init.items() if key not in ("backend",)}) | Initialize the device.
Parameters of visa.ResourceManager().open_resource() | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/TL/Visa.py#L26-L38 |
SiLab-Bonn/basil | basil/HL/bram_fifo.py | bram_fifo.get_data | def get_data(self):
''' Reading data in BRAM.
Returns
-------
array : numpy.ndarray
Array of unsigned integers (32 bit).
'''
fifo_int_size_1 = self.FIFO_INT_SIZE
fifo_int_size_2 = self.FIFO_INT_SIZE
if fifo_int_size_1 > fifo_int_size_2:
fifo_int_size = fifo_int_size_2 # use smaller chunk
logger.warning("Reading wrong FIFO size. Expected: %d <= %d" % (fifo_int_size_1, fifo_int_size_2))
else:
fifo_int_size = fifo_int_size_1 # use smaller chunk
return np.frombuffer(self._intf.read(self._conf['base_data_addr'], size=4 * fifo_int_size), dtype=np.dtype('<u4')) | python | def get_data(self):
''' Reading data in BRAM.
Returns
-------
array : numpy.ndarray
Array of unsigned integers (32 bit).
'''
fifo_int_size_1 = self.FIFO_INT_SIZE
fifo_int_size_2 = self.FIFO_INT_SIZE
if fifo_int_size_1 > fifo_int_size_2:
fifo_int_size = fifo_int_size_2 # use smaller chunk
logger.warning("Reading wrong FIFO size. Expected: %d <= %d" % (fifo_int_size_1, fifo_int_size_2))
else:
fifo_int_size = fifo_int_size_1 # use smaller chunk
return np.frombuffer(self._intf.read(self._conf['base_data_addr'], size=4 * fifo_int_size), dtype=np.dtype('<u4')) | Reading data in BRAM.
Returns
-------
array : numpy.ndarray
Array of unsigned integers (32 bit). | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/bram_fifo.py#L60-L75 |
SiLab-Bonn/basil | basil/utils/sim/SiLibUsbBusDriver.py | SiLibUsbBusDriver.read_external | def read_external(self, address):
"""Copied from silusb.sv testbench interface"""
self.bus.RD_B <= 1
self.bus.ADD <= self._x
self.bus.BUS_DATA <= self._high_impedence
for _ in range(5):
yield RisingEdge(self.clock)
yield RisingEdge(self.clock)
self.bus.ADD <= address + 0x4000
yield RisingEdge(self.clock)
self.bus.RD_B <= 0
yield RisingEdge(self.clock)
self.bus.RD_B <= 0
yield ReadOnly()
result = self.bus.BUS_DATA.value.integer
yield RisingEdge(self.clock)
self.bus.RD_B <= 1
yield RisingEdge(self.clock)
self.bus.RD_B <= 1
self.bus.ADD <= self._x
yield RisingEdge(self.clock)
for _ in range(5):
yield RisingEdge(self.clock)
raise ReturnValue(result) | python | def read_external(self, address):
"""Copied from silusb.sv testbench interface"""
self.bus.RD_B <= 1
self.bus.ADD <= self._x
self.bus.BUS_DATA <= self._high_impedence
for _ in range(5):
yield RisingEdge(self.clock)
yield RisingEdge(self.clock)
self.bus.ADD <= address + 0x4000
yield RisingEdge(self.clock)
self.bus.RD_B <= 0
yield RisingEdge(self.clock)
self.bus.RD_B <= 0
yield ReadOnly()
result = self.bus.BUS_DATA.value.integer
yield RisingEdge(self.clock)
self.bus.RD_B <= 1
yield RisingEdge(self.clock)
self.bus.RD_B <= 1
self.bus.ADD <= self._x
yield RisingEdge(self.clock)
for _ in range(5):
yield RisingEdge(self.clock)
raise ReturnValue(result) | Copied from silusb.sv testbench interface | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/sim/SiLibUsbBusDriver.py#L101-L130 |
SiLab-Bonn/basil | basil/utils/sim/SiLibUsbBusDriver.py | SiLibUsbBusDriver.write_external | def write_external(self, address, value):
"""Copied from silusb.sv testbench interface"""
self.bus.WR_B <= 1
self.bus.ADD <= self._x
for _ in range(5):
yield RisingEdge(self.clock)
yield RisingEdge(self.clock)
self.bus.ADD <= address + 0x4000
self.bus.BUS_DATA <= int(value)
yield Timer(1) # This is hack for iverilog
self.bus.ADD <= address + 0x4000
self.bus.BUS_DATA <= int(value)
yield RisingEdge(self.clock)
self.bus.WR_B <= 0
yield Timer(1) # This is hack for iverilog
self.bus.BUS_DATA <= int(value)
self.bus.WR_B <= 0
yield RisingEdge(self.clock)
self.bus.WR_B <= 0
yield Timer(1) # This is hack for iverilog
self.bus.BUS_DATA <= int(value)
self.bus.WR_B <= 0
yield RisingEdge(self.clock)
self.bus.WR_B <= 1
self.bus.BUS_DATA <= self._high_impedence
yield Timer(1) # This is hack for iverilog
self.bus.WR_B <= 1
self.bus.BUS_DATA <= self._high_impedence
yield RisingEdge(self.clock)
self.bus.WR_B <= 1
self.bus.ADD <= self._x
for _ in range(5):
yield RisingEdge(self.clock) | python | def write_external(self, address, value):
"""Copied from silusb.sv testbench interface"""
self.bus.WR_B <= 1
self.bus.ADD <= self._x
for _ in range(5):
yield RisingEdge(self.clock)
yield RisingEdge(self.clock)
self.bus.ADD <= address + 0x4000
self.bus.BUS_DATA <= int(value)
yield Timer(1) # This is hack for iverilog
self.bus.ADD <= address + 0x4000
self.bus.BUS_DATA <= int(value)
yield RisingEdge(self.clock)
self.bus.WR_B <= 0
yield Timer(1) # This is hack for iverilog
self.bus.BUS_DATA <= int(value)
self.bus.WR_B <= 0
yield RisingEdge(self.clock)
self.bus.WR_B <= 0
yield Timer(1) # This is hack for iverilog
self.bus.BUS_DATA <= int(value)
self.bus.WR_B <= 0
yield RisingEdge(self.clock)
self.bus.WR_B <= 1
self.bus.BUS_DATA <= self._high_impedence
yield Timer(1) # This is hack for iverilog
self.bus.WR_B <= 1
self.bus.BUS_DATA <= self._high_impedence
yield RisingEdge(self.clock)
self.bus.WR_B <= 1
self.bus.ADD <= self._x
for _ in range(5):
yield RisingEdge(self.clock) | Copied from silusb.sv testbench interface | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/sim/SiLibUsbBusDriver.py#L133-L168 |
SiLab-Bonn/basil | basil/HL/FEI4AdapterCard.py | AdcMax1239._setup_adc | def _setup_adc(self, flags):
'''Initialize ADC
'''
self._intf.write(self._base_addr + self.MAX_1239_ADD, array('B', pack('B', flags))) | python | def _setup_adc(self, flags):
'''Initialize ADC
'''
self._intf.write(self._base_addr + self.MAX_1239_ADD, array('B', pack('B', flags))) | Initialize ADC | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/FEI4AdapterCard.py#L45-L48 |
SiLab-Bonn/basil | basil/HL/FEI4AdapterCard.py | AdcMax1239._get_adc_value | def _get_adc_value(self, channel, average=None):
'''Read ADC
'''
conf = self.SCAN_OFF | self.SINGLE_ENDED | ((0x1e) & (channel << 1))
self._intf.write(self._base_addr + self.MAX_1239_ADD, array('B', pack('B', conf)))
def read_data():
ret = self._intf.read(self._base_addr + self.MAX_1239_ADD | 1, size=2)
ret.reverse()
ret[1] = ret[1] & 0x0f # 12-bit ADC
return unpack_from('H', ret)[0]
if average:
raw = 0
for _ in range(average):
raw += read_data()
raw /= average
else:
raw = read_data()
return raw | python | def _get_adc_value(self, channel, average=None):
'''Read ADC
'''
conf = self.SCAN_OFF | self.SINGLE_ENDED | ((0x1e) & (channel << 1))
self._intf.write(self._base_addr + self.MAX_1239_ADD, array('B', pack('B', conf)))
def read_data():
ret = self._intf.read(self._base_addr + self.MAX_1239_ADD | 1, size=2)
ret.reverse()
ret[1] = ret[1] & 0x0f # 12-bit ADC
return unpack_from('H', ret)[0]
if average:
raw = 0
for _ in range(average):
raw += read_data()
raw /= average
else:
raw = read_data()
return raw | Read ADC | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/FEI4AdapterCard.py#L50-L70 |
SiLab-Bonn/basil | basil/HL/FEI4AdapterCard.py | Eeprom24Lc128._read_eeprom | def _read_eeprom(self, address, size):
'''Read EEPROM
'''
self._intf.write(self._base_addr + self.CAL_EEPROM_ADD, array('B', pack('>H', address & 0x3FFF))) # 14-bit address, 16384 bytes
n_pages, n_bytes = divmod(size, self.CAL_EEPROM_PAGE_SIZE)
data = array('B')
for _ in range(n_pages):
data.extend(self._intf.read(self._base_addr + self.CAL_EEPROM_ADD | 1, size=self.CAL_EEPROM_PAGE_SIZE))
if n_bytes > 0:
data.extend(self._intf.read(self._base_addr + self.CAL_EEPROM_ADD | 1, size=n_bytes))
return data | python | def _read_eeprom(self, address, size):
'''Read EEPROM
'''
self._intf.write(self._base_addr + self.CAL_EEPROM_ADD, array('B', pack('>H', address & 0x3FFF))) # 14-bit address, 16384 bytes
n_pages, n_bytes = divmod(size, self.CAL_EEPROM_PAGE_SIZE)
data = array('B')
for _ in range(n_pages):
data.extend(self._intf.read(self._base_addr + self.CAL_EEPROM_ADD | 1, size=self.CAL_EEPROM_PAGE_SIZE))
if n_bytes > 0:
data.extend(self._intf.read(self._base_addr + self.CAL_EEPROM_ADD | 1, size=n_bytes))
return data | Read EEPROM | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/FEI4AdapterCard.py#L102-L115 |
SiLab-Bonn/basil | basil/HL/FEI4AdapterCard.py | Fei4Dcs.set_default | def set_default(self, channels=None):
'''Setting default voltage
'''
if not channels:
channels = self._ch_cal.keys()
for channel in channels:
self.set_voltage(channel, self._ch_cal[channel]['default'], unit='V') | python | def set_default(self, channels=None):
'''Setting default voltage
'''
if not channels:
channels = self._ch_cal.keys()
for channel in channels:
self.set_voltage(channel, self._ch_cal[channel]['default'], unit='V') | Setting default voltage | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/FEI4AdapterCard.py#L140-L146 |
SiLab-Bonn/basil | basil/HL/FEI4AdapterCard.py | Fei4Dcs.get_voltage | def get_voltage(self, channel, unit='V'):
'''Reading voltage
'''
kwargs = self._ch_map[channel]['ADCV']
voltage_raw = self._get_adc_value(**kwargs)
voltage = (voltage_raw - self._ch_cal[channel]['ADCV']['offset']) / self._ch_cal[channel]['ADCV']['gain']
if unit == 'raw':
return voltage_raw
elif unit == 'V':
return voltage
elif unit == 'mV':
return voltage * 1000
else:
raise TypeError("Invalid unit type.") | python | def get_voltage(self, channel, unit='V'):
'''Reading voltage
'''
kwargs = self._ch_map[channel]['ADCV']
voltage_raw = self._get_adc_value(**kwargs)
voltage = (voltage_raw - self._ch_cal[channel]['ADCV']['offset']) / self._ch_cal[channel]['ADCV']['gain']
if unit == 'raw':
return voltage_raw
elif unit == 'V':
return voltage
elif unit == 'mV':
return voltage * 1000
else:
raise TypeError("Invalid unit type.") | Reading voltage | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/FEI4AdapterCard.py#L165-L180 |
SiLab-Bonn/basil | basil/HL/FEI4AdapterCard.py | Fei4Dcs.get_current | def get_current(self, channel, unit='A'):
'''Reading current
'''
kwargs = self._ch_map[channel]['ADCI']
current_raw = self._get_adc_value(**kwargs)
voltage = self.get_voltage(channel)
current_raw_iq = current_raw - (self._ch_cal[channel]['ADCI']['iq_offset'] + self._ch_cal[channel]['ADCI']['iq_gain'] * voltage) # quiescent current (IQ) compensation
current = (current_raw_iq - self._ch_cal[channel]['ADCI']['offset']) / self._ch_cal[channel]['ADCI']['gain']
if unit == 'raw':
return current_raw
elif unit == 'raw_iq':
return current_raw_iq
elif unit == 'A':
return current
elif unit == 'mA':
return current * 1000
elif unit == 'uA':
return current * 1000000
else:
raise TypeError("Invalid unit type.") | python | def get_current(self, channel, unit='A'):
'''Reading current
'''
kwargs = self._ch_map[channel]['ADCI']
current_raw = self._get_adc_value(**kwargs)
voltage = self.get_voltage(channel)
current_raw_iq = current_raw - (self._ch_cal[channel]['ADCI']['iq_offset'] + self._ch_cal[channel]['ADCI']['iq_gain'] * voltage) # quiescent current (IQ) compensation
current = (current_raw_iq - self._ch_cal[channel]['ADCI']['offset']) / self._ch_cal[channel]['ADCI']['gain']
if unit == 'raw':
return current_raw
elif unit == 'raw_iq':
return current_raw_iq
elif unit == 'A':
return current
elif unit == 'mA':
return current * 1000
elif unit == 'uA':
return current * 1000000
else:
raise TypeError("Invalid unit type.") | Reading current | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/FEI4AdapterCard.py#L182-L203 |
SiLab-Bonn/basil | basil/HL/FEI4AdapterCard.py | FEI4AdapterCard.read_eeprom_calibration | def read_eeprom_calibration(self, temperature=False): # use default values for temperature, EEPROM values are usually not calibrated and random
'''Reading EEPROM calibration for power regulators and temperature
'''
header = self.get_format()
if header == self.HEADER_V1:
data = self._read_eeprom(self.CAL_DATA_ADDR, size=calcsize(self.CAL_DATA_V1_FORMAT))
for idx, channel in enumerate(self._ch_cal.iterkeys()):
ch_data = data[idx * calcsize(self.CAL_DATA_CH_V1_FORMAT):(idx + 1) * calcsize(self.CAL_DATA_CH_V1_FORMAT)]
values = unpack_from(self.CAL_DATA_CH_V1_FORMAT, ch_data)
self._ch_cal[channel]['name'] = "".join([c for c in values[0] if (c in string.printable)]) # values[0].strip()
self._ch_cal[channel]['default'] = values[1]
self._ch_cal[channel]['ADCI']['gain'] = values[2]
self._ch_cal[channel]['ADCI']['offset'] = values[3]
self._ch_cal[channel]['ADCI']['iq_gain'] = values[4]
self._ch_cal[channel]['ADCI']['iq_offset'] = values[5]
self._ch_cal[channel]['ADCV']['gain'] = values[6]
self._ch_cal[channel]['ADCV']['offset'] = values[7]
self._ch_cal[channel]['DACV']['gain'] = values[8]
self._ch_cal[channel]['DACV']['offset'] = values[9]
const_data = data[-calcsize(self.CAL_DATA_CONST_V1_FORMAT):]
values = unpack_from(self.CAL_DATA_CONST_V1_FORMAT, const_data)
if temperature:
for channel in self._ch_cal.keys():
self._ch_cal[channel]['VNTC']['B_NTC'] = values[0]
self._ch_cal[channel]['VNTC']['R1'] = values[1]
self._ch_cal[channel]['VNTC']['R2'] = values[2]
self._ch_cal[channel]['VNTC']['R4'] = values[3]
self._ch_cal[channel]['VNTC']['R_NTC_25'] = values[4]
self._ch_cal[channel]['VNTC']['VREF'] = values[5]
else:
raise ValueError('EEPROM data format not supported (header: %s)' % header) | python | def read_eeprom_calibration(self, temperature=False): # use default values for temperature, EEPROM values are usually not calibrated and random
'''Reading EEPROM calibration for power regulators and temperature
'''
header = self.get_format()
if header == self.HEADER_V1:
data = self._read_eeprom(self.CAL_DATA_ADDR, size=calcsize(self.CAL_DATA_V1_FORMAT))
for idx, channel in enumerate(self._ch_cal.iterkeys()):
ch_data = data[idx * calcsize(self.CAL_DATA_CH_V1_FORMAT):(idx + 1) * calcsize(self.CAL_DATA_CH_V1_FORMAT)]
values = unpack_from(self.CAL_DATA_CH_V1_FORMAT, ch_data)
self._ch_cal[channel]['name'] = "".join([c for c in values[0] if (c in string.printable)]) # values[0].strip()
self._ch_cal[channel]['default'] = values[1]
self._ch_cal[channel]['ADCI']['gain'] = values[2]
self._ch_cal[channel]['ADCI']['offset'] = values[3]
self._ch_cal[channel]['ADCI']['iq_gain'] = values[4]
self._ch_cal[channel]['ADCI']['iq_offset'] = values[5]
self._ch_cal[channel]['ADCV']['gain'] = values[6]
self._ch_cal[channel]['ADCV']['offset'] = values[7]
self._ch_cal[channel]['DACV']['gain'] = values[8]
self._ch_cal[channel]['DACV']['offset'] = values[9]
const_data = data[-calcsize(self.CAL_DATA_CONST_V1_FORMAT):]
values = unpack_from(self.CAL_DATA_CONST_V1_FORMAT, const_data)
if temperature:
for channel in self._ch_cal.keys():
self._ch_cal[channel]['VNTC']['B_NTC'] = values[0]
self._ch_cal[channel]['VNTC']['R1'] = values[1]
self._ch_cal[channel]['VNTC']['R2'] = values[2]
self._ch_cal[channel]['VNTC']['R4'] = values[3]
self._ch_cal[channel]['VNTC']['R_NTC_25'] = values[4]
self._ch_cal[channel]['VNTC']['VREF'] = values[5]
else:
raise ValueError('EEPROM data format not supported (header: %s)' % header) | Reading EEPROM calibration for power regulators and temperature | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/FEI4AdapterCard.py#L336-L366 |
SiLab-Bonn/basil | basil/HL/FEI4AdapterCard.py | FEI4AdapterCard.get_temperature | def get_temperature(self, channel, sensor='VNTC'):
'''Reading temperature
'''
# NTC type SEMITEC 103KT1608 http://www.semitec.co.jp/english/products/pdf/KT_Thermistor.pdf
#
# R_NTC = R_25 * exp(B_NTC * (1/T - 1/T_25))
#
# R_NTC measured NTC resistance
# R_NTC_25 resistance @ 25C
# B_NTC temperature coefficient
# Temperature current temperature (Kelvin)
# T_25 298,15 K (25C)
#
# B_NTC NTC 'b' coefficient, NTC Semitec 103KT1608-1P
# R_NTC_25 NTC 25C resistance, NTC Semitec 103KT1608-1P
# R1 resistor value for NTC voltage divider
# R2 value of R2 in the reference voltage divider
# R4 value of R4 in the reference voltage divider
# VREF supply voltage of the resistor bridge
#
kwargs = self._ch_map[channel][sensor]
temp_raw = self._get_adc_value(**kwargs)
v_adc = ((temp_raw - self._ch_cal.items()[0][1]['ADCV']['offset']) / self._ch_cal.items()[0][1]['ADCV']['gain']) # voltage, VDDA1
k = self._ch_cal[channel][sensor]['R4'] / (self._ch_cal[channel][sensor]['R2'] + self._ch_cal[channel][sensor]['R4']) # reference voltage divider
r_ntc = self._ch_cal[channel][sensor]['R1'] * (k - v_adc / self._ch_cal[channel][sensor]['VREF']) / (1 - k + v_adc / self._ch_cal[channel][sensor]['VREF']) # NTC resistance
return (self._ch_cal[channel][sensor]['B_NTC'] / (log(r_ntc) - log(self._ch_cal[channel][sensor]['R_NTC_25']) + self._ch_cal[channel][sensor]['B_NTC'] / self.T_KELVIN_25)) - self.T_KELVIN_0 | python | def get_temperature(self, channel, sensor='VNTC'):
'''Reading temperature
'''
# NTC type SEMITEC 103KT1608 http://www.semitec.co.jp/english/products/pdf/KT_Thermistor.pdf
#
# R_NTC = R_25 * exp(B_NTC * (1/T - 1/T_25))
#
# R_NTC measured NTC resistance
# R_NTC_25 resistance @ 25C
# B_NTC temperature coefficient
# Temperature current temperature (Kelvin)
# T_25 298,15 K (25C)
#
# B_NTC NTC 'b' coefficient, NTC Semitec 103KT1608-1P
# R_NTC_25 NTC 25C resistance, NTC Semitec 103KT1608-1P
# R1 resistor value for NTC voltage divider
# R2 value of R2 in the reference voltage divider
# R4 value of R4 in the reference voltage divider
# VREF supply voltage of the resistor bridge
#
kwargs = self._ch_map[channel][sensor]
temp_raw = self._get_adc_value(**kwargs)
v_adc = ((temp_raw - self._ch_cal.items()[0][1]['ADCV']['offset']) / self._ch_cal.items()[0][1]['ADCV']['gain']) # voltage, VDDA1
k = self._ch_cal[channel][sensor]['R4'] / (self._ch_cal[channel][sensor]['R2'] + self._ch_cal[channel][sensor]['R4']) # reference voltage divider
r_ntc = self._ch_cal[channel][sensor]['R1'] * (k - v_adc / self._ch_cal[channel][sensor]['VREF']) / (1 - k + v_adc / self._ch_cal[channel][sensor]['VREF']) # NTC resistance
return (self._ch_cal[channel][sensor]['B_NTC'] / (log(r_ntc) - log(self._ch_cal[channel][sensor]['R_NTC_25']) + self._ch_cal[channel][sensor]['B_NTC'] / self.T_KELVIN_25)) - self.T_KELVIN_0 | Reading temperature | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/FEI4AdapterCard.py#L368-L396 |
SiLab-Bonn/basil | basil/utils/DataManipulation.py | convert_data_array | def convert_data_array(arr, filter_func=None, converter_func=None):
'''Filter and convert any given data array of any dtype.
Parameters
----------
arr : numpy.array
Data array of any dtype.
filter_func : function
Function that takes array and returns true or false for each item in array.
converter_func : function
Function that takes array and returns an array or tuple of arrays.
Returns
-------
array of specified dimension (converter_func) and content (filter_func)
'''
# if filter_func != None:
# if not hasattr(filter_func, '__call__'):
# raise ValueError('Filter is not callable')
if filter_func:
array = arr[filter_func(arr)] # Indexing with Boolean Arrays
# if converter_func != None:
# if not hasattr(converter_func, '__call__'):
# raise ValueError('Converter is not callable')
if converter_func:
arr = converter_func(arr)
return array | python | def convert_data_array(arr, filter_func=None, converter_func=None):
'''Filter and convert any given data array of any dtype.
Parameters
----------
arr : numpy.array
Data array of any dtype.
filter_func : function
Function that takes array and returns true or false for each item in array.
converter_func : function
Function that takes array and returns an array or tuple of arrays.
Returns
-------
array of specified dimension (converter_func) and content (filter_func)
'''
# if filter_func != None:
# if not hasattr(filter_func, '__call__'):
# raise ValueError('Filter is not callable')
if filter_func:
array = arr[filter_func(arr)] # Indexing with Boolean Arrays
# if converter_func != None:
# if not hasattr(converter_func, '__call__'):
# raise ValueError('Converter is not callable')
if converter_func:
arr = converter_func(arr)
return array | Filter and convert any given data array of any dtype.
Parameters
----------
arr : numpy.array
Data array of any dtype.
filter_func : function
Function that takes array and returns true or false for each item in array.
converter_func : function
Function that takes array and returns an array or tuple of arrays.
Returns
-------
array of specified dimension (converter_func) and content (filter_func) | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/DataManipulation.py#L11-L37 |
SiLab-Bonn/basil | basil/utils/DataManipulation.py | logical_and | def logical_and(f1, f2): # function factory
'''Logical and from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function
Examples
--------
filter_func=logical_and(is_data_record, is_data_from_channel(4)) # new filter function
filter_func(array) # array that has Data Records from channel 4
'''
def f_and(arr):
return np.logical_and(f1(arr), f2(arr))
f_and.__name__ = f1.__name__ + "_and_" + f2.__name__
return f_and | python | def logical_and(f1, f2): # function factory
'''Logical and from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function
Examples
--------
filter_func=logical_and(is_data_record, is_data_from_channel(4)) # new filter function
filter_func(array) # array that has Data Records from channel 4
'''
def f_and(arr):
return np.logical_and(f1(arr), f2(arr))
f_and.__name__ = f1.__name__ + "_and_" + f2.__name__
return f_and | Logical and from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function
Examples
--------
filter_func=logical_and(is_data_record, is_data_from_channel(4)) # new filter function
filter_func(array) # array that has Data Records from channel 4 | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/DataManipulation.py#L44-L64 |
SiLab-Bonn/basil | basil/utils/DataManipulation.py | logical_or | def logical_or(f1, f2): # function factory
'''Logical or from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function
'''
def f_or(arr):
return np.logical_or(f1(arr), f2(arr))
f_or.__name__ = f1.__name__ + "_or_" + f2.__name__
return f_or | python | def logical_or(f1, f2): # function factory
'''Logical or from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function
'''
def f_or(arr):
return np.logical_or(f1(arr), f2(arr))
f_or.__name__ = f1.__name__ + "_or_" + f2.__name__
return f_or | Logical or from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/DataManipulation.py#L67-L82 |
SiLab-Bonn/basil | basil/utils/DataManipulation.py | logical_not | def logical_not(f): # function factory
'''Logical not from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function
'''
def f_not(arr):
return np.logical_not(f(arr))
f_not.__name__ = "not_" + f.__name__
return f_not | python | def logical_not(f): # function factory
'''Logical not from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function
'''
def f_not(arr):
return np.logical_not(f(arr))
f_not.__name__ = "not_" + f.__name__
return f_not | Logical not from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/DataManipulation.py#L85-L100 |
SiLab-Bonn/basil | basil/utils/DataManipulation.py | logical_xor | def logical_xor(f1, f2): # function factory
'''Logical xor from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function
'''
def f_xor(arr):
return np.logical_xor(f1(arr), f2(arr))
f_xor.__name__ = f1.__name__ + "_xor_" + f2.__name__
return f_xor | python | def logical_xor(f1, f2): # function factory
'''Logical xor from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function
'''
def f_xor(arr):
return np.logical_xor(f1(arr), f2(arr))
f_xor.__name__ = f1.__name__ + "_xor_" + f2.__name__
return f_xor | Logical xor from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/DataManipulation.py#L103-L118 |
SiLab-Bonn/basil | basil/utils/DataManipulation.py | arr_select | def arr_select(value): # function factory
'''Selecting array elements by bitwise and comparison to a given value.
Parameters:
value : int
Value to which array elements are compared to.
Returns:
array : np.array
'''
def f_eq(arr):
return np.equal(np.bitwise_and(arr, value), value)
f_eq.__name__ = "arr_bitwise_and_" + str(value) # or use inspect module: inspect.stack()[0][3]
return f_eq | python | def arr_select(value): # function factory
'''Selecting array elements by bitwise and comparison to a given value.
Parameters:
value : int
Value to which array elements are compared to.
Returns:
array : np.array
'''
def f_eq(arr):
return np.equal(np.bitwise_and(arr, value), value)
f_eq.__name__ = "arr_bitwise_and_" + str(value) # or use inspect module: inspect.stack()[0][3]
return f_eq | Selecting array elements by bitwise and comparison to a given value.
Parameters:
value : int
Value to which array elements are compared to.
Returns:
array : np.array | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/DataManipulation.py#L121-L134 |
SiLab-Bonn/basil | basil/utils/DataManipulation.py | arr_astype | def arr_astype(arr_type): # function factory
'''Change dtype of array.
Parameters:
arr_type : str, np.dtype
Character codes (e.g. 'b', '>H'), type strings (e.g. 'i4', 'f8'), Python types (e.g. float, int) and numpy dtypes (e.g. np.uint32) are allowed.
Returns:
array : np.array
'''
def f_astype(arr):
return arr.astype(arr_type)
f_astype.__name__ = "arr_astype_" + str(arr_type) # or use inspect module: inspect.stack()[0][3]
return f_astype | python | def arr_astype(arr_type): # function factory
'''Change dtype of array.
Parameters:
arr_type : str, np.dtype
Character codes (e.g. 'b', '>H'), type strings (e.g. 'i4', 'f8'), Python types (e.g. float, int) and numpy dtypes (e.g. np.uint32) are allowed.
Returns:
array : np.array
'''
def f_astype(arr):
return arr.astype(arr_type)
f_astype.__name__ = "arr_astype_" + str(arr_type) # or use inspect module: inspect.stack()[0][3]
return f_astype | Change dtype of array.
Parameters:
arr_type : str, np.dtype
Character codes (e.g. 'b', '>H'), type strings (e.g. 'i4', 'f8'), Python types (e.g. float, int) and numpy dtypes (e.g. np.uint32) are allowed.
Returns:
array : np.array | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/DataManipulation.py#L139-L152 |
SiLab-Bonn/basil | basil/utils/sim/Protocol.py | PickleInterface.send | def send(self, obj):
"""Prepend a 4-byte length to the string"""
assert isinstance(obj, ProtocolBase)
string = pickle.dumps(obj)
length = len(string)
self.sock.sendall(struct.pack("<I", length) + string) | python | def send(self, obj):
"""Prepend a 4-byte length to the string"""
assert isinstance(obj, ProtocolBase)
string = pickle.dumps(obj)
length = len(string)
self.sock.sendall(struct.pack("<I", length) + string) | Prepend a 4-byte length to the string | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/sim/Protocol.py#L57-L62 |
SiLab-Bonn/basil | basil/utils/sim/Protocol.py | PickleInterface.recv | def recv(self, blocking=True):
"""Receive the next object from the socket"""
length = struct.unpack("<I", self.sock.recv(4))[0]
return self._get_next_obj(length) | python | def recv(self, blocking=True):
"""Receive the next object from the socket"""
length = struct.unpack("<I", self.sock.recv(4))[0]
return self._get_next_obj(length) | Receive the next object from the socket | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/sim/Protocol.py#L64-L67 |
SiLab-Bonn/basil | basil/utils/sim/Protocol.py | PickleInterface.try_recv | def try_recv(self):
"""Return None immediately if nothing is waiting"""
try:
lenstr = self.sock.recv(4, socket.MSG_DONTWAIT)
except socket.error:
return None
if len(lenstr) < 4:
raise EOFError("Socket closed")
length = struct.unpack("<I", lenstr)[0]
return self._get_next_obj(length) | python | def try_recv(self):
"""Return None immediately if nothing is waiting"""
try:
lenstr = self.sock.recv(4, socket.MSG_DONTWAIT)
except socket.error:
return None
if len(lenstr) < 4:
raise EOFError("Socket closed")
length = struct.unpack("<I", lenstr)[0]
return self._get_next_obj(length) | Return None immediately if nothing is waiting | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/sim/Protocol.py#L69-L78 |
SiLab-Bonn/basil | basil/utils/sim/Protocol.py | PickleInterface._get_next_obj | def _get_next_obj(self, length):
"""Assumes we've already read the object length"""
data = b''
while len(data) < length:
data += self.sock.recv(length - len(data))
return pickle.loads(data) | python | def _get_next_obj(self, length):
"""Assumes we've already read the object length"""
data = b''
while len(data) < length:
data += self.sock.recv(length - len(data))
return pickle.loads(data) | Assumes we've already read the object length | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/sim/Protocol.py#L80-L86 |
SiLab-Bonn/basil | basil/dut.py | Dut.get_modules | def get_modules(self, type_name):
'''Getting modules by type name.
Parameters
----------
type_name : string
Type name of the modules to be returned.
Returns
-------
List of modules of given type name else empty list.
'''
modules = []
for module in self:
if module.__class__.__name__ == type_name:
modules.append(module)
return modules | python | def get_modules(self, type_name):
'''Getting modules by type name.
Parameters
----------
type_name : string
Type name of the modules to be returned.
Returns
-------
List of modules of given type name else empty list.
'''
modules = []
for module in self:
if module.__class__.__name__ == type_name:
modules.append(module)
return modules | Getting modules by type name.
Parameters
----------
type_name : string
Type name of the modules to be returned.
Returns
-------
List of modules of given type name else empty list. | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/dut.py#L288-L304 |
SiLab-Bonn/basil | basil/HL/sensirion_ekh4.py | sensirionEKH4.ask | def ask(self, command):
'''Read response to command and convert it to 16-bit integer.
Returns : list of values
'''
self.write(command)
time.sleep(0.1)
return self.read() | python | def ask(self, command):
'''Read response to command and convert it to 16-bit integer.
Returns : list of values
'''
self.write(command)
time.sleep(0.1)
return self.read() | Read response to command and convert it to 16-bit integer.
Returns : list of values | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/sensirion_ekh4.py#L34-L40 |
SiLab-Bonn/basil | basil/HL/tti_ql355tp.py | ttiQl355tp.set_enable | def set_enable(self, on, channel=1):
""" channel: 1=OP1, 2=OP2, 3=AUX, ALL=all channels"""
if isinstance(channel, str):
cmd = "OPALL %d" % int(on)
elif isinstance(channel, int):
cmd = "OP%d %d" % (channel, int(on))
self.write(cmd) | python | def set_enable(self, on, channel=1):
""" channel: 1=OP1, 2=OP2, 3=AUX, ALL=all channels"""
if isinstance(channel, str):
cmd = "OPALL %d" % int(on)
elif isinstance(channel, int):
cmd = "OP%d %d" % (channel, int(on))
self.write(cmd) | channel: 1=OP1, 2=OP2, 3=AUX, ALL=all channels | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/tti_ql355tp.py#L41-L47 |
SiLab-Bonn/basil | basil/HL/tti_ql355tp.py | ttiQl355tp.get_current | def get_current(self, channel):
""" channel: 1=OP1, 2=OP2, AUX is not supported"""
ret = self.ask("I%dO?" % channel)
if ret[-1] != "A":
print("ttiQl355tp.get_current() format error", ret)
return None
return float(ret[:-1]) | python | def get_current(self, channel):
""" channel: 1=OP1, 2=OP2, AUX is not supported"""
ret = self.ask("I%dO?" % channel)
if ret[-1] != "A":
print("ttiQl355tp.get_current() format error", ret)
return None
return float(ret[:-1]) | channel: 1=OP1, 2=OP2, AUX is not supported | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/tti_ql355tp.py#L52-L58 |
SiLab-Bonn/basil | basil/HL/tti_ql355tp.py | ttiQl355tp.get_voltage | def get_voltage(self, channel):
""" channel: 1=OP1, 2=OP2, AUX is not supported"""
ret = self.ask("V%dO?" % channel)
if ret[-1] != "V":
print("ttiQl355tp.get_voltage() format error", ret)
return None
return float(ret[:-1]) | python | def get_voltage(self, channel):
""" channel: 1=OP1, 2=OP2, AUX is not supported"""
ret = self.ask("V%dO?" % channel)
if ret[-1] != "V":
print("ttiQl355tp.get_voltage() format error", ret)
return None
return float(ret[:-1]) | channel: 1=OP1, 2=OP2, AUX is not supported | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/tti_ql355tp.py#L60-L66 |
SiLab-Bonn/basil | basil/HL/tti_ql355tp.py | ttiQl355tp.get_set_voltage | def get_set_voltage(self, channel):
""" channel: 1=OP1, 2=OP2, AUX is not supported"""
ret = self.ask("V%d?" % channel)
if ret[:3] != "V%d " % channel:
print("ttiQl355tp.get_voltage() format error", ret)
return None
return float(ret[3:]) | python | def get_set_voltage(self, channel):
""" channel: 1=OP1, 2=OP2, AUX is not supported"""
ret = self.ask("V%d?" % channel)
if ret[:3] != "V%d " % channel:
print("ttiQl355tp.get_voltage() format error", ret)
return None
return float(ret[3:]) | channel: 1=OP1, 2=OP2, AUX is not supported | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/tti_ql355tp.py#L68-L74 |
SiLab-Bonn/basil | basil/HL/tti_ql355tp.py | ttiQl355tp.get_current_limit | def get_current_limit(self, channel):
""" channel: 1=OP1, 2=OP2, AUX is not supported"""
ret = self.ask("I%d?" % channel)
if ret[:3] != "I%d " % channel:
print("ttiQl355tp.get_current_limit() format error", ret)
return None
return float(ret[3:]) | python | def get_current_limit(self, channel):
""" channel: 1=OP1, 2=OP2, AUX is not supported"""
ret = self.ask("I%d?" % channel)
if ret[:3] != "I%d " % channel:
print("ttiQl355tp.get_current_limit() format error", ret)
return None
return float(ret[3:]) | channel: 1=OP1, 2=OP2, AUX is not supported | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/tti_ql355tp.py#L76-L82 |
SiLab-Bonn/basil | basil/HL/tti_ql355tp.py | ttiQl355tp.set_voltage | def set_voltage(self, value, channel=1):
""" channel: 1=OP1, 2=OP2, AUX is not supported"""
cmd = "V%d %f" % (channel, value)
self.write(cmd) | python | def set_voltage(self, value, channel=1):
""" channel: 1=OP1, 2=OP2, AUX is not supported"""
cmd = "V%d %f" % (channel, value)
self.write(cmd) | channel: 1=OP1, 2=OP2, AUX is not supported | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/tti_ql355tp.py#L84-L87 |
SiLab-Bonn/basil | basil/HL/tti_ql355tp.py | ttiQl355tp.set_current_limit | def set_current_limit(self, value, channel=1):
""" channel: 1=OP1, 2=OP2, AUX is not supported"""
cmd = "I%d %f" % (channel, value)
self.write(cmd) | python | def set_current_limit(self, value, channel=1):
""" channel: 1=OP1, 2=OP2, AUX is not supported"""
cmd = "I%d %f" % (channel, value)
self.write(cmd) | channel: 1=OP1, 2=OP2, AUX is not supported | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/tti_ql355tp.py#L89-L92 |
SiLab-Bonn/basil | basil/HL/HardwareLayer.py | HardwareLayer.wait_for_ready | def wait_for_ready(self, timeout=None, times=None, delay=None, delay_between=None, abort=None):
'''Determine the ready state of the device and wait until device is ready.
Parameters
----------
timeout : int, float
The maximum amount of time to wait in seconds. Reaching the timeout will raise a RuntimeError.
times : int
Maximum number of times reading the ready state.
delay : int, float
The number of seconds to sleep before checks. Defaults to 0.
delay_between : int, float
The number of seconds to sleep between each check. Defaults to 0.
abort : Threading.Event
Breaking the loop from other threads.
Returns
-------
True if state is ready, else False.
'''
if delay:
try:
sleep(delay)
except IOError: # negative values
pass
if timeout is not None:
if timeout < 0:
raise ValueError("timeout is smaller than 0")
else:
stop = time() + timeout
times_checked = 0
while not self.is_ready:
now = time()
times_checked += 1
if abort and abort.is_set():
False
if timeout is not None and stop <= now:
raise RuntimeError('Time out while waiting for ready in %s, module %s' % (self.name, self.__class__.__module__))
if times and times > times_checked:
False
if delay_between:
try:
sleep(delay_between)
except IOError: # negative values
pass
return True | python | def wait_for_ready(self, timeout=None, times=None, delay=None, delay_between=None, abort=None):
'''Determine the ready state of the device and wait until device is ready.
Parameters
----------
timeout : int, float
The maximum amount of time to wait in seconds. Reaching the timeout will raise a RuntimeError.
times : int
Maximum number of times reading the ready state.
delay : int, float
The number of seconds to sleep before checks. Defaults to 0.
delay_between : int, float
The number of seconds to sleep between each check. Defaults to 0.
abort : Threading.Event
Breaking the loop from other threads.
Returns
-------
True if state is ready, else False.
'''
if delay:
try:
sleep(delay)
except IOError: # negative values
pass
if timeout is not None:
if timeout < 0:
raise ValueError("timeout is smaller than 0")
else:
stop = time() + timeout
times_checked = 0
while not self.is_ready:
now = time()
times_checked += 1
if abort and abort.is_set():
False
if timeout is not None and stop <= now:
raise RuntimeError('Time out while waiting for ready in %s, module %s' % (self.name, self.__class__.__module__))
if times and times > times_checked:
False
if delay_between:
try:
sleep(delay_between)
except IOError: # negative values
pass
return True | Determine the ready state of the device and wait until device is ready.
Parameters
----------
timeout : int, float
The maximum amount of time to wait in seconds. Reaching the timeout will raise a RuntimeError.
times : int
Maximum number of times reading the ready state.
delay : int, float
The number of seconds to sleep before checks. Defaults to 0.
delay_between : int, float
The number of seconds to sleep between each check. Defaults to 0.
abort : Threading.Event
Breaking the loop from other threads.
Returns
-------
True if state is ready, else False. | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/HardwareLayer.py#L26-L71 |
SiLab-Bonn/basil | basil/HL/FEI4QuadModuleAdapterCard.py | DacMax5380._set_dac_value | def _set_dac_value(self, channel, value):
'''Write DAC
'''
self._intf.write(self._base_addr + self.MAX_5380_ADD, array('B', pack('B', value))) | python | def _set_dac_value(self, channel, value):
'''Write DAC
'''
self._intf.write(self._base_addr + self.MAX_5380_ADD, array('B', pack('B', value))) | Write DAC | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/FEI4QuadModuleAdapterCard.py#L32-L35 |
SiLab-Bonn/basil | basil/HL/FEI4QuadModuleAdapterCard.py | DacDs4424._set_dac_value | def _set_dac_value(self, channel, value):
'''Write DAC
'''
# DAC value cannot be -128
if value == -128:
value = -127
if value < 0:
sign = 1
else:
sign = 0
value = (sign << 7) | (0x7F & abs(value))
self._intf.write(self._base_addr + self.DS_4424_ADD, array('B', pack('BB', channel, value))) | python | def _set_dac_value(self, channel, value):
'''Write DAC
'''
# DAC value cannot be -128
if value == -128:
value = -127
if value < 0:
sign = 1
else:
sign = 0
value = (sign << 7) | (0x7F & abs(value))
self._intf.write(self._base_addr + self.DS_4424_ADD, array('B', pack('BB', channel, value))) | Write DAC | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/FEI4QuadModuleAdapterCard.py#L49-L60 |
SiLab-Bonn/basil | basil/HL/FEI4QuadModuleAdapterCard.py | FEI4QuadModuleAdapterCard.read_eeprom_calibration | def read_eeprom_calibration(self, temperature=False): # use default values for temperature, EEPROM values are usually not calibrated and random
'''Reading EEPROM calibration for power regulators and temperature
'''
header = self.get_format()
if header == self.HEADER_V2:
data = self._read_eeprom(self.CAL_DATA_ADDR, size=calcsize(self.CAL_DATA_V2_FORMAT))
for idx, channel in enumerate(self._ch_cal.iterkeys()):
ch_data = data[idx * calcsize(self.CAL_DATA_CH_V2_FORMAT):(idx + 1) * calcsize(self.CAL_DATA_CH_V2_FORMAT)]
values = unpack_from(self.CAL_DATA_CH_V2_FORMAT, ch_data)
self._ch_cal[channel]['name'] = "".join([c for c in values[0] if (c in string.printable)]) # values[0].strip()
self._ch_cal[channel]['default'] = values[1]
self._ch_cal[channel]['ADCI']['gain'] = values[2]
self._ch_cal[channel]['ADCI']['offset'] = values[3]
self._ch_cal[channel]['ADCI']['iq_gain'] = values[4]
self._ch_cal[channel]['ADCI']['iq_offset'] = values[5]
self._ch_cal[channel]['ADCV']['gain'] = values[6]
self._ch_cal[channel]['ADCV']['offset'] = values[7]
self._ch_cal[channel]['DACV']['gain'] = values[8]
self._ch_cal[channel]['DACV']['offset'] = values[9]
if temperature:
self._ch_cal[channel]['NTC']['B_NTC'] = values[10]
self._ch_cal[channel]['NTC']['R1'] = values[11]
self._ch_cal[channel]['NTC']['R2'] = values[12]
self._ch_cal[channel]['NTC']['R4'] = values[13]
self._ch_cal[channel]['NTC']['R_NTC_25'] = values[14]
self._ch_cal[channel]['NTC']['VREF'] = values[15]
else:
raise ValueError('EEPROM data format not supported (header: %s)' % header) | python | def read_eeprom_calibration(self, temperature=False): # use default values for temperature, EEPROM values are usually not calibrated and random
'''Reading EEPROM calibration for power regulators and temperature
'''
header = self.get_format()
if header == self.HEADER_V2:
data = self._read_eeprom(self.CAL_DATA_ADDR, size=calcsize(self.CAL_DATA_V2_FORMAT))
for idx, channel in enumerate(self._ch_cal.iterkeys()):
ch_data = data[idx * calcsize(self.CAL_DATA_CH_V2_FORMAT):(idx + 1) * calcsize(self.CAL_DATA_CH_V2_FORMAT)]
values = unpack_from(self.CAL_DATA_CH_V2_FORMAT, ch_data)
self._ch_cal[channel]['name'] = "".join([c for c in values[0] if (c in string.printable)]) # values[0].strip()
self._ch_cal[channel]['default'] = values[1]
self._ch_cal[channel]['ADCI']['gain'] = values[2]
self._ch_cal[channel]['ADCI']['offset'] = values[3]
self._ch_cal[channel]['ADCI']['iq_gain'] = values[4]
self._ch_cal[channel]['ADCI']['iq_offset'] = values[5]
self._ch_cal[channel]['ADCV']['gain'] = values[6]
self._ch_cal[channel]['ADCV']['offset'] = values[7]
self._ch_cal[channel]['DACV']['gain'] = values[8]
self._ch_cal[channel]['DACV']['offset'] = values[9]
if temperature:
self._ch_cal[channel]['NTC']['B_NTC'] = values[10]
self._ch_cal[channel]['NTC']['R1'] = values[11]
self._ch_cal[channel]['NTC']['R2'] = values[12]
self._ch_cal[channel]['NTC']['R4'] = values[13]
self._ch_cal[channel]['NTC']['R_NTC_25'] = values[14]
self._ch_cal[channel]['NTC']['VREF'] = values[15]
else:
raise ValueError('EEPROM data format not supported (header: %s)' % header) | Reading EEPROM calibration for power regulators and temperature | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/FEI4QuadModuleAdapterCard.py#L164-L191 |
SiLab-Bonn/basil | basil/HL/FEI4QuadModuleAdapterCard.py | FEI4QuadModuleAdapterCard.set_current_limit | def set_current_limit(self, channel, value, unit='A'):
'''Setting current limit
Note: same limit for all channels.
'''
dac_offset = self._ch_cal[channel]['DACI']['offset']
dac_gain = self._ch_cal[channel]['DACI']['gain']
if unit == 'raw':
value = value
elif unit == 'A':
value = int((value - dac_offset) / dac_gain)
elif unit == 'mA':
value = int((value / 1000 - dac_offset) / dac_gain)
else:
raise TypeError("Invalid unit type.")
DacMax5380._set_dac_value(self, channel, value) | python | def set_current_limit(self, channel, value, unit='A'):
'''Setting current limit
Note: same limit for all channels.
'''
dac_offset = self._ch_cal[channel]['DACI']['offset']
dac_gain = self._ch_cal[channel]['DACI']['gain']
if unit == 'raw':
value = value
elif unit == 'A':
value = int((value - dac_offset) / dac_gain)
elif unit == 'mA':
value = int((value / 1000 - dac_offset) / dac_gain)
else:
raise TypeError("Invalid unit type.")
DacMax5380._set_dac_value(self, channel, value) | Setting current limit
Note: same limit for all channels. | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/FEI4QuadModuleAdapterCard.py#L226-L243 |
SiLab-Bonn/basil | basil/utils/sim/SiLibUsb.py | SiUSBDevice.DownloadXilinx | def DownloadXilinx(self, bitfile):
"""We hijack this call to perform the socket connect"""
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect((self.simulation_host, self.simulation_port))
self._iface = PickleInterface(self._sock)
return True | python | def DownloadXilinx(self, bitfile):
"""We hijack this call to perform the socket connect"""
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect((self.simulation_host, self.simulation_port))
self._iface = PickleInterface(self._sock)
return True | We hijack this call to perform the socket connect | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/sim/SiLibUsb.py#L56-L61 |
SiLab-Bonn/basil | basil/HL/SussProber.py | SussProber.set_position | def set_position(self, x, y, speed=None):
''' Move chuck to absolute position in um'''
if speed:
self._intf.write('MoveChuckSubsite %1.1f %1.1f R Y %d' % (x, y, speed))
else:
self._intf.write('MoveChuckSubsite %1.1f %1.1f R Y' % (x, y)) | python | def set_position(self, x, y, speed=None):
''' Move chuck to absolute position in um'''
if speed:
self._intf.write('MoveChuckSubsite %1.1f %1.1f R Y %d' % (x, y, speed))
else:
self._intf.write('MoveChuckSubsite %1.1f %1.1f R Y' % (x, y)) | Move chuck to absolute position in um | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/SussProber.py#L20-L25 |
SiLab-Bonn/basil | basil/HL/SussProber.py | SussProber.move_position | def move_position(self, dx, dy, speed=None):
''' Move chuck relative to actual position in um'''
if speed:
self._intf.write('MoveChuckPosition %1.1f %1.1f R Y %d' % (dx, dy, speed))
else:
self._intf.write('MoveChuckPosition %1.1f %1.1f R Y' % (dx, dy)) | python | def move_position(self, dx, dy, speed=None):
''' Move chuck relative to actual position in um'''
if speed:
self._intf.write('MoveChuckPosition %1.1f %1.1f R Y %d' % (dx, dy, speed))
else:
self._intf.write('MoveChuckPosition %1.1f %1.1f R Y' % (dx, dy)) | Move chuck relative to actual position in um | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/SussProber.py#L27-L32 |
SiLab-Bonn/basil | basil/HL/SussProber.py | SussProber.get_position | def get_position(self):
''' Read chuck position (x, y, z)'''
reply = self._intf.query('ReadChuckPosition Y H')[2:]
return [float(i) for i in reply.split()] | python | def get_position(self):
''' Read chuck position (x, y, z)'''
reply = self._intf.query('ReadChuckPosition Y H')[2:]
return [float(i) for i in reply.split()] | Read chuck position (x, y, z) | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/SussProber.py#L34-L37 |
SiLab-Bonn/basil | basil/HL/SussProber.py | SussProber.get_die | def get_die(self):
''' Move chuck to wafer map chip index'''
reply = self._intf.query('ReadMapPosition')
values = reply[2:].split(' ')
return (int(values[0]), int(values[1])) | python | def get_die(self):
''' Move chuck to wafer map chip index'''
reply = self._intf.query('ReadMapPosition')
values = reply[2:].split(' ')
return (int(values[0]), int(values[1])) | Move chuck to wafer map chip index | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/SussProber.py#L51-L55 |
SiLab-Bonn/basil | basil/RL/TrackRegister.py | TrackRegister.clear | def clear(self):
'Clear tracks in memory - all zero'
for track in self._tracks:
self._tracks[track].setall(False) | python | def clear(self):
'Clear tracks in memory - all zero'
for track in self._tracks:
self._tracks[track].setall(False) | Clear tracks in memory - all zero | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/RL/TrackRegister.py#L38-L41 |
SiLab-Bonn/basil | basil/HL/sitcp_fifo.py | sitcp_fifo.get_data | def get_data(self):
''' Reading data from SiTCP FIFO (via TCP).
Returns
-------
array : numpy.ndarray
Array of unsigned integers (32 bit).
'''
fifo_size = self._intf._get_tcp_data_size()
fifo_int_size = int((fifo_size - (fifo_size % 4)) / 4)
data = self._intf._get_tcp_data(fifo_int_size * 4)
return np.frombuffer(data, dtype=np.dtype('<u4')) | python | def get_data(self):
''' Reading data from SiTCP FIFO (via TCP).
Returns
-------
array : numpy.ndarray
Array of unsigned integers (32 bit).
'''
fifo_size = self._intf._get_tcp_data_size()
fifo_int_size = int((fifo_size - (fifo_size % 4)) / 4)
data = self._intf._get_tcp_data(fifo_int_size * 4)
return np.frombuffer(data, dtype=np.dtype('<u4')) | Reading data from SiTCP FIFO (via TCP).
Returns
-------
array : numpy.ndarray
Array of unsigned integers (32 bit). | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/sitcp_fifo.py#L61-L72 |
SiLab-Bonn/basil | basil/HL/sitcp_fifo.py | sitcp_fifo.set_data | def set_data(self, data):
''' Sending data to via TCP.
Parameters
----------
data : array
Array of unsigned integers (32 bit).
'''
data = array.array('B', struct.unpack("{}B".format(len(data) * 4), struct.pack("{}I".format(len(data)), *data)))
self._intf._send_tcp_data(data) | python | def set_data(self, data):
''' Sending data to via TCP.
Parameters
----------
data : array
Array of unsigned integers (32 bit).
'''
data = array.array('B', struct.unpack("{}B".format(len(data) * 4), struct.pack("{}I".format(len(data)), *data)))
self._intf._send_tcp_data(data) | Sending data to via TCP.
Parameters
----------
data : array
Array of unsigned integers (32 bit). | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/sitcp_fifo.py#L74-L83 |
SiLab-Bonn/basil | examples/mio_pixel/pixel.py | Pixel.program_global_reg | def program_global_reg(self):
"""
Send the global register to the chip.
Loads the values of self['GLOBAL_REG'] onto the chip.
Includes enabling the clock, and loading the Control (CTR)
and DAC shadow registers.
"""
self._clear_strobes()
gr_size = len(self['GLOBAL_REG'][:]) # get the size
self['SEQ']['SHIFT_IN'][0:gr_size] = self['GLOBAL_REG'][:] # this will be shifted out
self['SEQ']['GLOBAL_SHIFT_EN'][0:gr_size] = bitarray(gr_size * '1') # this is to enable clock
self['SEQ']['GLOBAL_CTR_LD'][gr_size + 1:gr_size + 2] = bitarray("1") # load signals
self['SEQ']['GLOBAL_DAC_LD'][gr_size + 1:gr_size + 2] = bitarray("1")
# Execute the program (write bits to output pins)
# + 1 extra 0 bit so that everything ends on LOW instead of HIGH
self._run_seq(gr_size + 3) | python | def program_global_reg(self):
"""
Send the global register to the chip.
Loads the values of self['GLOBAL_REG'] onto the chip.
Includes enabling the clock, and loading the Control (CTR)
and DAC shadow registers.
"""
self._clear_strobes()
gr_size = len(self['GLOBAL_REG'][:]) # get the size
self['SEQ']['SHIFT_IN'][0:gr_size] = self['GLOBAL_REG'][:] # this will be shifted out
self['SEQ']['GLOBAL_SHIFT_EN'][0:gr_size] = bitarray(gr_size * '1') # this is to enable clock
self['SEQ']['GLOBAL_CTR_LD'][gr_size + 1:gr_size + 2] = bitarray("1") # load signals
self['SEQ']['GLOBAL_DAC_LD'][gr_size + 1:gr_size + 2] = bitarray("1")
# Execute the program (write bits to output pins)
# + 1 extra 0 bit so that everything ends on LOW instead of HIGH
self._run_seq(gr_size + 3) | Send the global register to the chip.
Loads the values of self['GLOBAL_REG'] onto the chip.
Includes enabling the clock, and loading the Control (CTR)
and DAC shadow registers. | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/examples/mio_pixel/pixel.py#L24-L44 |
SiLab-Bonn/basil | examples/mio_pixel/pixel.py | Pixel.program_pixel_reg | def program_pixel_reg(self, enable_receiver=True):
"""
Send the pixel register to the chip and store the output.
Loads the values of self['PIXEL_REG'] onto the chip.
Includes enabling the clock, and loading the Control (CTR)
and DAC shadow registers.
if(enable_receiver), stores the output (by byte) in
self['DATA'], retrievable via `chip['DATA'].get_data()`.
"""
self._clear_strobes()
# enable receiver it work only if pixel register is enabled/clocked
self['PIXEL_RX'].set_en(enable_receiver)
px_size = len(self['PIXEL_REG'][:]) # get the size
self['SEQ']['SHIFT_IN'][0:px_size] = self['PIXEL_REG'][:] # this will be shifted out
self['SEQ']['PIXEL_SHIFT_EN'][0:px_size] = bitarray(px_size * '1') # this is to enable clock
self._run_seq(px_size + 1) | python | def program_pixel_reg(self, enable_receiver=True):
"""
Send the pixel register to the chip and store the output.
Loads the values of self['PIXEL_REG'] onto the chip.
Includes enabling the clock, and loading the Control (CTR)
and DAC shadow registers.
if(enable_receiver), stores the output (by byte) in
self['DATA'], retrievable via `chip['DATA'].get_data()`.
"""
self._clear_strobes()
# enable receiver it work only if pixel register is enabled/clocked
self['PIXEL_RX'].set_en(enable_receiver)
px_size = len(self['PIXEL_REG'][:]) # get the size
self['SEQ']['SHIFT_IN'][0:px_size] = self['PIXEL_REG'][:] # this will be shifted out
self['SEQ']['PIXEL_SHIFT_EN'][0:px_size] = bitarray(px_size * '1') # this is to enable clock
self._run_seq(px_size + 1) | Send the pixel register to the chip and store the output.
Loads the values of self['PIXEL_REG'] onto the chip.
Includes enabling the clock, and loading the Control (CTR)
and DAC shadow registers.
if(enable_receiver), stores the output (by byte) in
self['DATA'], retrievable via `chip['DATA'].get_data()`. | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/examples/mio_pixel/pixel.py#L46-L68 |
SiLab-Bonn/basil | examples/mio_pixel/pixel.py | Pixel._run_seq | def _run_seq(self, size):
"""
Send the contents of self['SEQ'] to the chip and wait until it finishes.
"""
# Write the sequence to the sequence generator (hw driver)
self['SEQ'].write(size) # write pattern to memory
self['SEQ'].set_size(size) # set size
self['SEQ'].set_repeat(1) # set repeat
self['SEQ'].start() # start
while not self['SEQ'].get_done():
time.sleep(0.01) | python | def _run_seq(self, size):
"""
Send the contents of self['SEQ'] to the chip and wait until it finishes.
"""
# Write the sequence to the sequence generator (hw driver)
self['SEQ'].write(size) # write pattern to memory
self['SEQ'].set_size(size) # set size
self['SEQ'].set_repeat(1) # set repeat
self['SEQ'].start() # start
while not self['SEQ'].get_done():
time.sleep(0.01) | Send the contents of self['SEQ'] to the chip and wait until it finishes. | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/examples/mio_pixel/pixel.py#L70-L84 |
SiLab-Bonn/basil | basil/HL/i2c.py | i2c.write | def write(self, addr, data):
'''Write access.
:param addr: i2c slave address
:type addr: char
:param data: array/list of bytes
:type data: iterable
:rtype: None
'''
self.set_addr(addr & 0xfe)
self.set_data(data)
self.set_size(len(data))
self.start()
while not self.is_ready:
pass | python | def write(self, addr, data):
'''Write access.
:param addr: i2c slave address
:type addr: char
:param data: array/list of bytes
:type data: iterable
:rtype: None
'''
self.set_addr(addr & 0xfe)
self.set_data(data)
self.set_size(len(data))
self.start()
while not self.is_ready:
pass | Write access.
:param addr: i2c slave address
:type addr: char
:param data: array/list of bytes
:type data: iterable
:rtype: None | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/i2c.py#L93-L108 |
SiLab-Bonn/basil | basil/HL/i2c.py | i2c.read | def read(self, addr, size):
'''Read access.
:param addr: i2c slave address
:type addr: char
:param size: size of transfer
:type size: int
:returns: data byte array
:rtype: array.array('B')
'''
self.set_addr(addr | 0x01)
self.set_size(size)
self.start()
while not self.is_ready:
pass
return self.get_data(size) | python | def read(self, addr, size):
'''Read access.
:param addr: i2c slave address
:type addr: char
:param size: size of transfer
:type size: int
:returns: data byte array
:rtype: array.array('B')
'''
self.set_addr(addr | 0x01)
self.set_size(size)
self.start()
while not self.is_ready:
pass
return self.get_data(size) | Read access.
:param addr: i2c slave address
:type addr: char
:param size: size of transfer
:type size: int
:returns: data byte array
:rtype: array.array('B') | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/i2c.py#L110-L127 |
riga/law | law/parser.py | full_parser | def full_parser():
"""
Returns the full *ArgumentParser* used by the luigi ``CmdlineParser``. The returned instance is
cached.
"""
global _full_parser
if _full_parser:
return _full_parser
luigi_parser = luigi.cmdline_parser.CmdlineParser.get_instance()
if not luigi_parser:
return None
# build the full argument parser with luigi helpers
root_task = luigi_parser.known_args.root_task
_full_parser = luigi_parser._build_parser(root_task)
logger.debug("build full luigi argument parser")
return _full_parser | python | def full_parser():
"""
Returns the full *ArgumentParser* used by the luigi ``CmdlineParser``. The returned instance is
cached.
"""
global _full_parser
if _full_parser:
return _full_parser
luigi_parser = luigi.cmdline_parser.CmdlineParser.get_instance()
if not luigi_parser:
return None
# build the full argument parser with luigi helpers
root_task = luigi_parser.known_args.root_task
_full_parser = luigi_parser._build_parser(root_task)
logger.debug("build full luigi argument parser")
return _full_parser | Returns the full *ArgumentParser* used by the luigi ``CmdlineParser``. The returned instance is
cached. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/parser.py#L27-L47 |
riga/law | law/parser.py | root_task_parser | def root_task_parser():
"""
Returns a new *ArgumentParser* instance that only contains paremeter actions of the root task.
The returned instance is cached.
"""
global _root_task_parser
if _root_task_parser:
return _root_task_parser
luigi_parser = luigi.cmdline_parser.CmdlineParser.get_instance()
if not luigi_parser:
return None
root_task = luigi_parser.known_args.root_task
# get all root task parameter destinations
root_dests = []
for task_name, _, param_name, _ in luigi.task_register.Register.get_all_params():
if task_name == root_task:
root_dests.append(param_name)
# create a new parser and add all root actions
_root_task_parser = ArgumentParser(add_help=False)
for action in list(full_parser()._actions):
if not action.option_strings or action.dest in root_dests:
_root_task_parser._add_action(action)
logger.debug("build luigi argument parser for root task {}".format(root_task))
return _root_task_parser | python | def root_task_parser():
"""
Returns a new *ArgumentParser* instance that only contains paremeter actions of the root task.
The returned instance is cached.
"""
global _root_task_parser
if _root_task_parser:
return _root_task_parser
luigi_parser = luigi.cmdline_parser.CmdlineParser.get_instance()
if not luigi_parser:
return None
root_task = luigi_parser.known_args.root_task
# get all root task parameter destinations
root_dests = []
for task_name, _, param_name, _ in luigi.task_register.Register.get_all_params():
if task_name == root_task:
root_dests.append(param_name)
# create a new parser and add all root actions
_root_task_parser = ArgumentParser(add_help=False)
for action in list(full_parser()._actions):
if not action.option_strings or action.dest in root_dests:
_root_task_parser._add_action(action)
logger.debug("build luigi argument parser for root task {}".format(root_task))
return _root_task_parser | Returns a new *ArgumentParser* instance that only contains paremeter actions of the root task.
The returned instance is cached. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/parser.py#L50-L80 |
riga/law | law/parser.py | global_cmdline_args | def global_cmdline_args():
"""
Returns the list of command line arguments that do not belong to the root task. The returned
list is cached. Example:
.. code-block:: python
global_cmdline_args()
# -> ["--local-scheduler"]
"""
global _global_cmdline_args
if _global_cmdline_args:
return _global_cmdline_args
luigi_parser = luigi.cmdline_parser.CmdlineParser.get_instance()
if not luigi_parser:
return None
_global_cmdline_args = root_task_parser().parse_known_args(luigi_parser.cmdline_args)[1]
return _global_cmdline_args | python | def global_cmdline_args():
"""
Returns the list of command line arguments that do not belong to the root task. The returned
list is cached. Example:
.. code-block:: python
global_cmdline_args()
# -> ["--local-scheduler"]
"""
global _global_cmdline_args
if _global_cmdline_args:
return _global_cmdline_args
luigi_parser = luigi.cmdline_parser.CmdlineParser.get_instance()
if not luigi_parser:
return None
_global_cmdline_args = root_task_parser().parse_known_args(luigi_parser.cmdline_args)[1]
return _global_cmdline_args | Returns the list of command line arguments that do not belong to the root task. The returned
list is cached. Example:
.. code-block:: python
global_cmdline_args()
# -> ["--local-scheduler"] | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/parser.py#L83-L104 |
riga/law | law/parser.py | global_cmdline_values | def global_cmdline_values():
"""
Returns a dictionary of global command line arguments (computed with
:py:func:`global_cmdline_args`) to their current values. The returnd dictionary is cached.
Example:
.. code-block:: python
global_cmdline_values()
# -> {"core_local_scheduler": True}
"""
global _global_cmdline_values
if _global_cmdline_values:
return _global_cmdline_values
luigi_parser = luigi.cmdline_parser.CmdlineParser.get_instance()
if not luigi_parser:
return None
# go through all actions of the full luigi parser and compare option strings
# with the global cmdline args
parser = full_parser()
global_args = global_cmdline_args()
_global_cmdline_values = {}
for action in parser._actions:
if any(arg in action.option_strings for arg in global_args):
_global_cmdline_values[action.dest] = getattr(luigi_parser.known_args, action.dest)
return _global_cmdline_values | python | def global_cmdline_values():
"""
Returns a dictionary of global command line arguments (computed with
:py:func:`global_cmdline_args`) to their current values. The returnd dictionary is cached.
Example:
.. code-block:: python
global_cmdline_values()
# -> {"core_local_scheduler": True}
"""
global _global_cmdline_values
if _global_cmdline_values:
return _global_cmdline_values
luigi_parser = luigi.cmdline_parser.CmdlineParser.get_instance()
if not luigi_parser:
return None
# go through all actions of the full luigi parser and compare option strings
# with the global cmdline args
parser = full_parser()
global_args = global_cmdline_args()
_global_cmdline_values = {}
for action in parser._actions:
if any(arg in action.option_strings for arg in global_args):
_global_cmdline_values[action.dest] = getattr(luigi_parser.known_args, action.dest)
return _global_cmdline_values | Returns a dictionary of global command line arguments (computed with
:py:func:`global_cmdline_args`) to their current values. The returnd dictionary is cached.
Example:
.. code-block:: python
global_cmdline_values()
# -> {"core_local_scheduler": True} | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/parser.py#L107-L136 |
riga/law | law/parser.py | add_cmdline_arg | def add_cmdline_arg(args, arg, *values):
"""
Adds a command line argument *arg* to a list of argument *args*, e.g. as returned from
:py:func:`global_cmdline_args`. When *arg* exists, *args* is returned unchanged. Otherwise,
*arg* is appended to the end with optional argument *values*. Example:
.. code-block:: python
args = global_cmdline_values()
# -> ["--local-scheduler"]
add_cmdline_arg(args, "--local-scheduler")
# -> ["--local-scheduler"]
add_cmdline_arg(args, "--workers", 4)
# -> ["--local-scheduler", "--workers", "4"]
"""
if arg not in args:
args = list(args) + [arg] + list(values)
return args | python | def add_cmdline_arg(args, arg, *values):
"""
Adds a command line argument *arg* to a list of argument *args*, e.g. as returned from
:py:func:`global_cmdline_args`. When *arg* exists, *args* is returned unchanged. Otherwise,
*arg* is appended to the end with optional argument *values*. Example:
.. code-block:: python
args = global_cmdline_values()
# -> ["--local-scheduler"]
add_cmdline_arg(args, "--local-scheduler")
# -> ["--local-scheduler"]
add_cmdline_arg(args, "--workers", 4)
# -> ["--local-scheduler", "--workers", "4"]
"""
if arg not in args:
args = list(args) + [arg] + list(values)
return args | Adds a command line argument *arg* to a list of argument *args*, e.g. as returned from
:py:func:`global_cmdline_args`. When *arg* exists, *args* is returned unchanged. Otherwise,
*arg* is appended to the end with optional argument *values*. Example:
.. code-block:: python
args = global_cmdline_values()
# -> ["--local-scheduler"]
add_cmdline_arg(args, "--local-scheduler")
# -> ["--local-scheduler"]
add_cmdline_arg(args, "--workers", 4)
# -> ["--local-scheduler", "--workers", "4"] | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/parser.py#L139-L158 |
riga/law | law/parser.py | remove_cmdline_arg | def remove_cmdline_arg(args, arg, n=1):
"""
Removes the command line argument *args* from a list of arguments *args*, e.g. as returned from
:py:func:`global_cmdline_args`. When *n* is 1 or less, only the argument is removed. Otherwise,
the following *n-1* values are removed. Example:
.. code-block:: python
args = global_cmdline_values()
# -> ["--local-scheduler", "--workers", "4"]
remove_cmdline_arg(args, "--local-scheduler")
# -> ["--workers", "4"]
remove_cmdline_arg(args, "--workers", 2)
# -> ["--local-scheduler"]
"""
if arg in args:
idx = args.index(arg)
args = list(args)
del args[idx:idx + max(n, 1)]
return args | python | def remove_cmdline_arg(args, arg, n=1):
"""
Removes the command line argument *args* from a list of arguments *args*, e.g. as returned from
:py:func:`global_cmdline_args`. When *n* is 1 or less, only the argument is removed. Otherwise,
the following *n-1* values are removed. Example:
.. code-block:: python
args = global_cmdline_values()
# -> ["--local-scheduler", "--workers", "4"]
remove_cmdline_arg(args, "--local-scheduler")
# -> ["--workers", "4"]
remove_cmdline_arg(args, "--workers", 2)
# -> ["--local-scheduler"]
"""
if arg in args:
idx = args.index(arg)
args = list(args)
del args[idx:idx + max(n, 1)]
return args | Removes the command line argument *args* from a list of arguments *args*, e.g. as returned from
:py:func:`global_cmdline_args`. When *n* is 1 or less, only the argument is removed. Otherwise,
the following *n-1* values are removed. Example:
.. code-block:: python
args = global_cmdline_values()
# -> ["--local-scheduler", "--workers", "4"]
remove_cmdline_arg(args, "--local-scheduler")
# -> ["--workers", "4"]
remove_cmdline_arg(args, "--workers", 2)
# -> ["--local-scheduler"] | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/parser.py#L161-L182 |
riga/law | law/target/file.py | split_transfer_kwargs | def split_transfer_kwargs(kwargs, skip=None):
"""
Takes keyword arguments *kwargs*, splits them into two separate dictionaries depending on their
content, and returns them in a tuple. The first one will contain arguments related to potential
file transfer operations (e.g. ``"cache"`` or ``"retries"``), while the second one will contain
all remaining arguments. This function is used internally to decide which arguments to pass to
target formatters. *skip* can be a list of argument keys that are ignored.
"""
skip = make_list(skip) if skip else []
transfer_kwargs = {
name: kwargs.pop(name)
for name in ["cache", "prefer_cache", "retries", "retry_delay"]
if name in kwargs and name not in skip
}
return transfer_kwargs, kwargs | python | def split_transfer_kwargs(kwargs, skip=None):
"""
Takes keyword arguments *kwargs*, splits them into two separate dictionaries depending on their
content, and returns them in a tuple. The first one will contain arguments related to potential
file transfer operations (e.g. ``"cache"`` or ``"retries"``), while the second one will contain
all remaining arguments. This function is used internally to decide which arguments to pass to
target formatters. *skip* can be a list of argument keys that are ignored.
"""
skip = make_list(skip) if skip else []
transfer_kwargs = {
name: kwargs.pop(name)
for name in ["cache", "prefer_cache", "retries", "retry_delay"]
if name in kwargs and name not in skip
}
return transfer_kwargs, kwargs | Takes keyword arguments *kwargs*, splits them into two separate dictionaries depending on their
content, and returns them in a tuple. The first one will contain arguments related to potential
file transfer operations (e.g. ``"cache"`` or ``"retries"``), while the second one will contain
all remaining arguments. This function is used internally to decide which arguments to pass to
target formatters. *skip* can be a list of argument keys that are ignored. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/target/file.py#L323-L337 |
riga/law | law/workflow/remote.py | SubmissionData.job_data | def job_data(cls, job_id=dummy_job_id, branches=None, **kwargs):
"""
Returns a dictionary containing default job submission information such as the *job_id* and
task *branches* covered by the job.
"""
return dict(job_id=job_id, branches=branches or []) | python | def job_data(cls, job_id=dummy_job_id, branches=None, **kwargs):
"""
Returns a dictionary containing default job submission information such as the *job_id* and
task *branches* covered by the job.
"""
return dict(job_id=job_id, branches=branches or []) | Returns a dictionary containing default job submission information such as the *job_id* and
task *branches* covered by the job. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/workflow/remote.py#L51-L56 |
riga/law | law/workflow/remote.py | StatusData.job_data | def job_data(cls, job_id=dummy_job_id, status=None, code=None, error=None, **kwargs):
"""
Returns a dictionary containing default job status information such as the *job_id*, a job
*status* string, a job return code, and an *error* message.
"""
return dict(job_id=job_id, status=status, code=code, error=error) | python | def job_data(cls, job_id=dummy_job_id, status=None, code=None, error=None, **kwargs):
"""
Returns a dictionary containing default job status information such as the *job_id*, a job
*status* string, a job return code, and an *error* message.
"""
return dict(job_id=job_id, status=status, code=code, error=error) | Returns a dictionary containing default job status information such as the *job_id*, a job
*status* string, a job return code, and an *error* message. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/workflow/remote.py#L91-L96 |
riga/law | law/workflow/remote.py | BaseRemoteWorkflowProxy._get_task_attribute | def _get_task_attribute(self, name, fallback=False):
"""
Return an attribute of the actial task named ``<workflow_type>_<name>``.
When the attribute does not exist and *fallback* is *True*, try to return the task attribute
simply named *name*. In any case, if a requested task attribute is eventually not found, an
AttributeError is raised.
"""
attr = "{}_{}".format(self.workflow_type, name)
if not fallback:
return getattr(self.task, attr)
else:
value = getattr(self.task, attr, no_value)
return value if value != no_value else getattr(self.task, name) | python | def _get_task_attribute(self, name, fallback=False):
"""
Return an attribute of the actial task named ``<workflow_type>_<name>``.
When the attribute does not exist and *fallback* is *True*, try to return the task attribute
simply named *name*. In any case, if a requested task attribute is eventually not found, an
AttributeError is raised.
"""
attr = "{}_{}".format(self.workflow_type, name)
if not fallback:
return getattr(self.task, attr)
else:
value = getattr(self.task, attr, no_value)
return value if value != no_value else getattr(self.task, name) | Return an attribute of the actial task named ``<workflow_type>_<name>``.
When the attribute does not exist and *fallback* is *True*, try to return the task attribute
simply named *name*. In any case, if a requested task attribute is eventually not found, an
AttributeError is raised. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/workflow/remote.py#L258-L270 |
riga/law | law/workflow/remote.py | BaseRemoteWorkflowProxy.output | def output(self):
"""
Returns the default workflow outputs in an ordered dictionary. At the moment, this is the
collection of outputs of the branch tasks (key ``"collection"``), the submission file (key
``"submission"``), and the status file (key ``"status"``). These two *control outputs* are
optional, i.e., they are not considered when checking the task's completeness.
"""
task = self.task
# get the directory where the control outputs are stored
out_dir = self._get_task_attribute("output_directory")()
# define outputs
outputs = OrderedDict()
postfix = self._get_task_attribute("output_postfix")()
# a file containing the submission data, i.e. job ids etc
submission_file = "{}_submission{}.json".format(self.workflow_type, postfix)
outputs["submission"] = out_dir.child(submission_file, type="f")
outputs["submission"].optional = True
# a file containing status data when the jobs are done
if not task.no_poll:
status_file = "{}_status{}.json".format(self.workflow_type, postfix)
outputs["status"] = out_dir.child(status_file, type="f")
outputs["status"].optional = True
# update with upstream output when not just controlling running jobs
if not task.is_controlling_remote_jobs():
outputs.update(super(BaseRemoteWorkflowProxy, self).output())
return outputs | python | def output(self):
"""
Returns the default workflow outputs in an ordered dictionary. At the moment, this is the
collection of outputs of the branch tasks (key ``"collection"``), the submission file (key
``"submission"``), and the status file (key ``"status"``). These two *control outputs* are
optional, i.e., they are not considered when checking the task's completeness.
"""
task = self.task
# get the directory where the control outputs are stored
out_dir = self._get_task_attribute("output_directory")()
# define outputs
outputs = OrderedDict()
postfix = self._get_task_attribute("output_postfix")()
# a file containing the submission data, i.e. job ids etc
submission_file = "{}_submission{}.json".format(self.workflow_type, postfix)
outputs["submission"] = out_dir.child(submission_file, type="f")
outputs["submission"].optional = True
# a file containing status data when the jobs are done
if not task.no_poll:
status_file = "{}_status{}.json".format(self.workflow_type, postfix)
outputs["status"] = out_dir.child(status_file, type="f")
outputs["status"].optional = True
# update with upstream output when not just controlling running jobs
if not task.is_controlling_remote_jobs():
outputs.update(super(BaseRemoteWorkflowProxy, self).output())
return outputs | Returns the default workflow outputs in an ordered dictionary. At the moment, this is the
collection of outputs of the branch tasks (key ``"collection"``), the submission file (key
``"submission"``), and the status file (key ``"status"``). These two *control outputs* are
optional, i.e., they are not considered when checking the task's completeness. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/workflow/remote.py#L282-L313 |
riga/law | law/workflow/remote.py | BaseRemoteWorkflowProxy.dump_submission_data | def dump_submission_data(self):
"""
Dumps the current submission data to the submission file.
"""
# renew the dashboard config
self.submission_data["dashboard_config"] = self.dashboard.get_persistent_config()
# write the submission data to the output file
self._outputs["submission"].dump(self.submission_data, formatter="json", indent=4) | python | def dump_submission_data(self):
"""
Dumps the current submission data to the submission file.
"""
# renew the dashboard config
self.submission_data["dashboard_config"] = self.dashboard.get_persistent_config()
# write the submission data to the output file
self._outputs["submission"].dump(self.submission_data, formatter="json", indent=4) | Dumps the current submission data to the submission file. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/workflow/remote.py#L315-L323 |
riga/law | law/workflow/remote.py | BaseRemoteWorkflowProxy.run | def run(self):
"""
Actual run method that starts the processing of jobs and initiates the status polling, or
performs job cancelling or cleaning, depending on the task parameters.
"""
task = self.task
self._outputs = self.output()
# create the job dashboard interface
self.dashboard = task.create_job_dashboard() or NoJobDashboard()
# read submission data and reset some values
submitted = not task.ignore_submission and self._outputs["submission"].exists()
if submitted:
self.submission_data.update(self._outputs["submission"].load(formatter="json"))
task.tasks_per_job = self.submission_data.tasks_per_job
self.dashboard.apply_config(self.submission_data.dashboard_config)
# when the branch outputs, i.e. the "collection" exists, just create dummy control outputs
if "collection" in self._outputs and self._outputs["collection"].exists():
self.touch_control_outputs()
# cancel jobs?
elif self._cancel_jobs:
if submitted:
self.cancel()
# cleanup jobs?
elif self._cleanup_jobs:
if submitted:
self.cleanup()
# submit and/or wait while polling
else:
# maybe set a tracking url
tracking_url = self.dashboard.create_tracking_url()
if tracking_url:
task.set_tracking_url(tracking_url)
# ensure the output directory exists
if not submitted:
self._outputs["submission"].parent.touch()
# at this point, when the status file exists, it is considered outdated
if "status" in self._outputs:
self._outputs["status"].remove()
try:
# instantiate the configured job file factory, not kwargs yet
self.job_file_factory = self.create_job_file_factory()
# submit
if not submitted:
# set the initial list of unsubmitted jobs
branches = sorted(task.branch_map.keys())
branch_chunks = list(iter_chunks(branches, task.tasks_per_job))
self.submission_data.unsubmitted_jobs = OrderedDict(
(i + 1, branches) for i, branches in enumerate(branch_chunks)
)
self.submit()
# sleep once to give the job interface time to register the jobs
post_submit_delay = self._get_task_attribute("post_submit_delay")()
if post_submit_delay:
time.sleep(post_submit_delay)
# start status polling when a) no_poll is not set, or b) the jobs were already
# submitted so that failed jobs are resubmitted after a single polling iteration
if not task.no_poll or submitted:
self.poll()
finally:
# in any event, cleanup the job file
if self.job_file_factory:
self.job_file_factory.cleanup_dir(force=False) | python | def run(self):
"""
Actual run method that starts the processing of jobs and initiates the status polling, or
performs job cancelling or cleaning, depending on the task parameters.
"""
task = self.task
self._outputs = self.output()
# create the job dashboard interface
self.dashboard = task.create_job_dashboard() or NoJobDashboard()
# read submission data and reset some values
submitted = not task.ignore_submission and self._outputs["submission"].exists()
if submitted:
self.submission_data.update(self._outputs["submission"].load(formatter="json"))
task.tasks_per_job = self.submission_data.tasks_per_job
self.dashboard.apply_config(self.submission_data.dashboard_config)
# when the branch outputs, i.e. the "collection" exists, just create dummy control outputs
if "collection" in self._outputs and self._outputs["collection"].exists():
self.touch_control_outputs()
# cancel jobs?
elif self._cancel_jobs:
if submitted:
self.cancel()
# cleanup jobs?
elif self._cleanup_jobs:
if submitted:
self.cleanup()
# submit and/or wait while polling
else:
# maybe set a tracking url
tracking_url = self.dashboard.create_tracking_url()
if tracking_url:
task.set_tracking_url(tracking_url)
# ensure the output directory exists
if not submitted:
self._outputs["submission"].parent.touch()
# at this point, when the status file exists, it is considered outdated
if "status" in self._outputs:
self._outputs["status"].remove()
try:
# instantiate the configured job file factory, not kwargs yet
self.job_file_factory = self.create_job_file_factory()
# submit
if not submitted:
# set the initial list of unsubmitted jobs
branches = sorted(task.branch_map.keys())
branch_chunks = list(iter_chunks(branches, task.tasks_per_job))
self.submission_data.unsubmitted_jobs = OrderedDict(
(i + 1, branches) for i, branches in enumerate(branch_chunks)
)
self.submit()
# sleep once to give the job interface time to register the jobs
post_submit_delay = self._get_task_attribute("post_submit_delay")()
if post_submit_delay:
time.sleep(post_submit_delay)
# start status polling when a) no_poll is not set, or b) the jobs were already
# submitted so that failed jobs are resubmitted after a single polling iteration
if not task.no_poll or submitted:
self.poll()
finally:
# in any event, cleanup the job file
if self.job_file_factory:
self.job_file_factory.cleanup_dir(force=False) | Actual run method that starts the processing of jobs and initiates the status polling, or
performs job cancelling or cleaning, depending on the task parameters. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/workflow/remote.py#L326-L400 |
riga/law | law/workflow/remote.py | BaseRemoteWorkflowProxy.cancel | def cancel(self):
"""
Cancels running jobs. The job ids are read from the submission file which has to exist
for obvious reasons.
"""
task = self.task
# get job ids from submission data
job_ids = [
d["job_id"] for d in self.submission_data.jobs.values()
if d["job_id"] not in (self.submission_data.dummy_job_id, None)
]
if not job_ids:
return
# cancel jobs
task.publish_message("going to cancel {} jobs".format(len(job_ids)))
errors = self.job_manager.cancel_batch(job_ids)
# print errors
if errors:
print("{} error(s) occured while cancelling {} job(s) of task {}:".format(
len(errors), len(job_ids), task.task_id))
tmpl = " {}"
for i, err in enumerate(errors):
print(tmpl.format(err))
if i + 1 >= self.show_errors:
remaining = len(errors) - self.show_errors
if remaining > 0:
print(" ... and {} more".format(remaining))
break
# inform the dashboard
for job_num, job_data in six.iteritems(self.submission_data.jobs):
task.forward_dashboard_event(self.dashboard, job_data, "action.cancel", job_num) | python | def cancel(self):
"""
Cancels running jobs. The job ids are read from the submission file which has to exist
for obvious reasons.
"""
task = self.task
# get job ids from submission data
job_ids = [
d["job_id"] for d in self.submission_data.jobs.values()
if d["job_id"] not in (self.submission_data.dummy_job_id, None)
]
if not job_ids:
return
# cancel jobs
task.publish_message("going to cancel {} jobs".format(len(job_ids)))
errors = self.job_manager.cancel_batch(job_ids)
# print errors
if errors:
print("{} error(s) occured while cancelling {} job(s) of task {}:".format(
len(errors), len(job_ids), task.task_id))
tmpl = " {}"
for i, err in enumerate(errors):
print(tmpl.format(err))
if i + 1 >= self.show_errors:
remaining = len(errors) - self.show_errors
if remaining > 0:
print(" ... and {} more".format(remaining))
break
# inform the dashboard
for job_num, job_data in six.iteritems(self.submission_data.jobs):
task.forward_dashboard_event(self.dashboard, job_data, "action.cancel", job_num) | Cancels running jobs. The job ids are read from the submission file which has to exist
for obvious reasons. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/workflow/remote.py#L402-L436 |
riga/law | law/workflow/remote.py | BaseRemoteWorkflowProxy.cleanup | def cleanup(self):
"""
Cleans up jobs on the remote run location. The job ids are read from the submission file
which has to exist for obvious reasons.
"""
task = self.task
# get job ids from submission data
job_ids = [
d["job_id"] for d in self.submission_data.jobs.values()
if d["job_id"] not in (self.submission_data.dummy_job_id, None)
]
if not job_ids:
return
# cleanup jobs
task.publish_message("going to cleanup {} jobs".format(len(job_ids)))
errors = self.job_manager.cleanup_batch(job_ids)
# print errors
if errors:
print("{} error(s) occured while cleaning up {} job(s) of task {}:".format(
len(errors), len(job_ids), task.task_id))
tmpl = " {}"
for i, err in enumerate(errors):
print(tmpl.format(err))
if i + 1 >= self.show_errors:
remaining = len(errors) - self.show_errors
if remaining > 0:
print(" ... and {} more".format(remaining))
break | python | def cleanup(self):
"""
Cleans up jobs on the remote run location. The job ids are read from the submission file
which has to exist for obvious reasons.
"""
task = self.task
# get job ids from submission data
job_ids = [
d["job_id"] for d in self.submission_data.jobs.values()
if d["job_id"] not in (self.submission_data.dummy_job_id, None)
]
if not job_ids:
return
# cleanup jobs
task.publish_message("going to cleanup {} jobs".format(len(job_ids)))
errors = self.job_manager.cleanup_batch(job_ids)
# print errors
if errors:
print("{} error(s) occured while cleaning up {} job(s) of task {}:".format(
len(errors), len(job_ids), task.task_id))
tmpl = " {}"
for i, err in enumerate(errors):
print(tmpl.format(err))
if i + 1 >= self.show_errors:
remaining = len(errors) - self.show_errors
if remaining > 0:
print(" ... and {} more".format(remaining))
break | Cleans up jobs on the remote run location. The job ids are read from the submission file
which has to exist for obvious reasons. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/workflow/remote.py#L438-L468 |
riga/law | law/workflow/remote.py | BaseRemoteWorkflowProxy.submit | def submit(self, retry_jobs=None):
"""
Submits all jobs. When *retry_jobs* is *None*, a new job list is built. Otherwise,
previously failed jobs defined in the *retry_jobs* dictionary, which maps job numbers to
lists of branch numbers, are used.
"""
task = self.task
# helper to check if a job can be skipped
# rule: skip a job when only_missing is set to True and all its branch tasks are complete
def skip_job(job_num, branches):
if not task.only_missing:
return False
elif job_num in self.skip_jobs:
return self.skip_jobs[job_num]
else:
self.skip_jobs[job_num] = all(task.as_branch(b).complete() for b in branches)
# when the job is skipped, write a dummy entry into submission data
if self.skip_jobs[job_num]:
self.submission_data.jobs[job_num] = self.submission_data_cls.job_data(
branches=branches)
return self.skip_jobs[job_num]
# collect data of jobs that should be submitted: num -> branches
submit_jobs = OrderedDict()
# handle jobs for resubmission
if retry_jobs:
for job_num, branches in six.iteritems(retry_jobs):
# even retry jobs can be skipped
if skip_job(job_num, branches):
continue
# the number of parallel jobs might be reached as well
# in that case, add the jobs back to the unsubmitted ones and update the job id
n = self.poll_data.n_active + len(submit_jobs)
if n >= self.poll_data.n_parallel:
self.submission_data.unsubmitted_jobs[job_num] = branches
del self.submission_data.jobs[job_num]
continue
# mark job for resubmission
submit_jobs[job_num] = sorted(branches)
# fill with jobs from the list of unsubmitted jobs until maximum number of parallel jobs is
# reached
new_jobs = OrderedDict()
for job_num, branches in list(self.submission_data.unsubmitted_jobs.items()):
# remove jobs that don't need to be submitted
if skip_job(job_num, branches):
del self.submission_data.unsubmitted_jobs[job_num]
continue
# do nothing when n_parallel is already reached
n = self.poll_data.n_active + len(submit_jobs) + len(new_jobs)
if n >= self.poll_data.n_parallel:
continue
# mark jobs for submission
del self.submission_data.unsubmitted_jobs[job_num]
new_jobs[job_num] = sorted(branches)
# add new jobs to the jobs to submit, maybe also shuffle
new_submission_data = OrderedDict()
new_job_nums = list(new_jobs.keys())
if task.shuffle_jobs:
random.shuffle(new_job_nums)
for job_num in new_job_nums:
submit_jobs[job_num] = new_jobs[job_num]
# when there is nothing to submit, dump the submission data to the output file and stop here
if not submit_jobs:
if retry_jobs or self.submission_data.unsubmitted_jobs:
self.dump_submission_data()
return new_submission_data
# create job submission files
job_files = [self.create_job_file(*tpl) for tpl in six.iteritems(submit_jobs)]
# log some stats
dst_info = self.destination_info() or ""
dst_info = dst_info and (", " + dst_info)
task.publish_message("going to submit {} {} job(s){}".format(
len(submit_jobs), self.workflow_type, dst_info))
# actual submission
job_ids = self.submit_jobs(job_files)
# store submission data
errors = []
for job_num, job_id in six.moves.zip(submit_jobs, job_ids):
# handle errors
error = (job_num, job_id) if isinstance(job_id, Exception) else None
if error:
errors.append((job_num, job_id))
job_id = self.submission_data_cls.dummy_job_id
# build the job data
branches = submit_jobs[job_num]
job_data = self.submission_data_cls.job_data(job_id=job_id, branches=branches)
self.submission_data.jobs[job_num] = job_data
new_submission_data[job_num] = job_data
# set the attempt number in the submission data
self.submission_data.attempts.setdefault(job_num, 0)
# inform the dashboard
task.forward_dashboard_event(self.dashboard, job_data, "action.submit", job_num)
# dump the submission data to the output file
self.dump_submission_data()
# raise exceptions or log
if errors:
print("{} error(s) occured during job submission of task {}:".format(
len(errors), task.task_id))
tmpl = " job {}: {}"
for i, tpl in enumerate(errors):
print(tmpl.format(*tpl))
if i + 1 >= self.show_errors:
remaining = len(errors) - self.show_errors
if remaining > 0:
print(" ... and {} more".format(remaining))
break
else:
task.publish_message("submitted {} job(s)".format(len(submit_jobs)) + dst_info)
return new_submission_data | python | def submit(self, retry_jobs=None):
"""
Submits all jobs. When *retry_jobs* is *None*, a new job list is built. Otherwise,
previously failed jobs defined in the *retry_jobs* dictionary, which maps job numbers to
lists of branch numbers, are used.
"""
task = self.task
# helper to check if a job can be skipped
# rule: skip a job when only_missing is set to True and all its branch tasks are complete
def skip_job(job_num, branches):
if not task.only_missing:
return False
elif job_num in self.skip_jobs:
return self.skip_jobs[job_num]
else:
self.skip_jobs[job_num] = all(task.as_branch(b).complete() for b in branches)
# when the job is skipped, write a dummy entry into submission data
if self.skip_jobs[job_num]:
self.submission_data.jobs[job_num] = self.submission_data_cls.job_data(
branches=branches)
return self.skip_jobs[job_num]
# collect data of jobs that should be submitted: num -> branches
submit_jobs = OrderedDict()
# handle jobs for resubmission
if retry_jobs:
for job_num, branches in six.iteritems(retry_jobs):
# even retry jobs can be skipped
if skip_job(job_num, branches):
continue
# the number of parallel jobs might be reached as well
# in that case, add the jobs back to the unsubmitted ones and update the job id
n = self.poll_data.n_active + len(submit_jobs)
if n >= self.poll_data.n_parallel:
self.submission_data.unsubmitted_jobs[job_num] = branches
del self.submission_data.jobs[job_num]
continue
# mark job for resubmission
submit_jobs[job_num] = sorted(branches)
# fill with jobs from the list of unsubmitted jobs until maximum number of parallel jobs is
# reached
new_jobs = OrderedDict()
for job_num, branches in list(self.submission_data.unsubmitted_jobs.items()):
# remove jobs that don't need to be submitted
if skip_job(job_num, branches):
del self.submission_data.unsubmitted_jobs[job_num]
continue
# do nothing when n_parallel is already reached
n = self.poll_data.n_active + len(submit_jobs) + len(new_jobs)
if n >= self.poll_data.n_parallel:
continue
# mark jobs for submission
del self.submission_data.unsubmitted_jobs[job_num]
new_jobs[job_num] = sorted(branches)
# add new jobs to the jobs to submit, maybe also shuffle
new_submission_data = OrderedDict()
new_job_nums = list(new_jobs.keys())
if task.shuffle_jobs:
random.shuffle(new_job_nums)
for job_num in new_job_nums:
submit_jobs[job_num] = new_jobs[job_num]
# when there is nothing to submit, dump the submission data to the output file and stop here
if not submit_jobs:
if retry_jobs or self.submission_data.unsubmitted_jobs:
self.dump_submission_data()
return new_submission_data
# create job submission files
job_files = [self.create_job_file(*tpl) for tpl in six.iteritems(submit_jobs)]
# log some stats
dst_info = self.destination_info() or ""
dst_info = dst_info and (", " + dst_info)
task.publish_message("going to submit {} {} job(s){}".format(
len(submit_jobs), self.workflow_type, dst_info))
# actual submission
job_ids = self.submit_jobs(job_files)
# store submission data
errors = []
for job_num, job_id in six.moves.zip(submit_jobs, job_ids):
# handle errors
error = (job_num, job_id) if isinstance(job_id, Exception) else None
if error:
errors.append((job_num, job_id))
job_id = self.submission_data_cls.dummy_job_id
# build the job data
branches = submit_jobs[job_num]
job_data = self.submission_data_cls.job_data(job_id=job_id, branches=branches)
self.submission_data.jobs[job_num] = job_data
new_submission_data[job_num] = job_data
# set the attempt number in the submission data
self.submission_data.attempts.setdefault(job_num, 0)
# inform the dashboard
task.forward_dashboard_event(self.dashboard, job_data, "action.submit", job_num)
# dump the submission data to the output file
self.dump_submission_data()
# raise exceptions or log
if errors:
print("{} error(s) occured during job submission of task {}:".format(
len(errors), task.task_id))
tmpl = " job {}: {}"
for i, tpl in enumerate(errors):
print(tmpl.format(*tpl))
if i + 1 >= self.show_errors:
remaining = len(errors) - self.show_errors
if remaining > 0:
print(" ... and {} more".format(remaining))
break
else:
task.publish_message("submitted {} job(s)".format(len(submit_jobs)) + dst_info)
return new_submission_data | Submits all jobs. When *retry_jobs* is *None*, a new job list is built. Otherwise,
previously failed jobs defined in the *retry_jobs* dictionary, which maps job numbers to
lists of branch numbers, are used. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/workflow/remote.py#L470-L597 |
riga/law | law/workflow/remote.py | BaseRemoteWorkflowProxy.poll | def poll(self):
"""
Initiates the job status polling loop.
"""
task = self.task
# total job count
n_jobs = len(self.submission_data)
# store the number of consecutive polling failures and get the maximum number of polls
n_poll_fails = 0
if task.walltime == NO_FLOAT:
max_polls = sys.maxsize
else:
max_polls = int(math.ceil((task.walltime * 3600.) / (task.poll_interval * 60.)))
# update variable attributes for polling
self.poll_data.n_finished_min = task.acceptance * (1 if task.acceptance > 1 else n_jobs)
self.poll_data.n_failed_max = task.tolerance * (1 if task.tolerance > 1 else n_jobs)
# track finished and failed jobs in dicts holding status data
finished_jobs = OrderedDict()
failed_jobs = OrderedDict()
# start the poll loop
for i in six.moves.range(max_polls):
# sleep after the first iteration
if i > 0:
time.sleep(task.poll_interval * 60)
# determine the currently active jobs, i.e., the jobs whose states should be checked
active_jobs = OrderedDict()
for job_num, data in six.iteritems(self.submission_data.jobs):
if job_num in finished_jobs or job_num in failed_jobs:
continue
if self.skip_jobs.get(job_num):
finished_jobs[job_num] = self.status_data_cls.job_data(
status=self.job_manager.FINISHED, code=0)
else:
data = data.copy()
if not data["job_id"]:
data["job_id"] = self.status_data_cls.dummy_job_id
active_jobs[job_num] = data
self.poll_data.n_active = len(active_jobs)
# query job states
job_ids = [data["job_id"] for data in six.itervalues(active_jobs)] # noqa: F812
_states, errors = self.job_manager.query_batch(job_ids)
if errors:
print("{} error(s) occured during job status query of task {}:".format(
len(errors), task.task_id))
tmpl = " {}"
for i, err in enumerate(errors):
print(tmpl.format(err))
if i + 1 >= self.show_errors:
remaining = len(errors) - self.show_errors
if remaining > 0:
print(" ... and {} more".format(remaining))
break
n_poll_fails += 1
if task.poll_fails > 0 and n_poll_fails > task.poll_fails:
raise Exception("poll_fails exceeded")
else:
continue
else:
n_poll_fails = 0
# states stores job_id's as keys, so replace them by using job_num's
states = OrderedDict()
for job_num, data in six.iteritems(active_jobs):
states[job_num] = self.status_data_cls.job_data(**_states[data["job_id"]])
# store jobs per status and take further actions depending on the status
pending_jobs = OrderedDict()
running_jobs = OrderedDict()
newly_failed_jobs = OrderedDict()
retry_jobs = OrderedDict()
for job_num, data in six.iteritems(states):
if data["status"] == self.job_manager.PENDING:
pending_jobs[job_num] = data
task.forward_dashboard_event(self.dashboard, data, "status.pending", job_num)
elif data["status"] == self.job_manager.RUNNING:
running_jobs[job_num] = data
task.forward_dashboard_event(self.dashboard, data, "status.running", job_num)
elif data["status"] == self.job_manager.FINISHED:
finished_jobs[job_num] = data
self.poll_data.n_active -= 1
task.forward_dashboard_event(self.dashboard, data, "status.finished", job_num)
elif data["status"] in (self.job_manager.FAILED, self.job_manager.RETRY):
newly_failed_jobs[job_num] = data
self.poll_data.n_active -= 1
# retry or ultimately failed?
if self.job_retries[job_num] < task.retries:
self.job_retries[job_num] += 1
self.submission_data.attempts[job_num] += 1
data["status"] = self.job_manager.RETRY
retry_jobs[job_num] = self.submission_data.jobs[job_num]["branches"]
task.forward_dashboard_event(self.dashboard, data, "status.retry", job_num)
else:
failed_jobs[job_num] = data
task.forward_dashboard_event(self.dashboard, data, "status.failed", job_num)
else:
raise Exception("unknown job status '{}'".format(data["status"]))
# gather some counts
n_pending = len(pending_jobs)
n_running = len(running_jobs)
n_finished = len(finished_jobs)
n_retry = len(retry_jobs)
n_failed = len(failed_jobs)
n_unsubmitted = len(self.submission_data.unsubmitted_jobs)
# log the status line
counts = (n_pending, n_running, n_finished, n_retry, n_failed)
if task.parallel_jobs > 0:
counts = (n_unsubmitted,) + counts
status_line = self.job_manager.status_line(counts, last_counts=True, sum_counts=n_jobs,
color=True, align=task.align_status_line)
task.publish_message(status_line)
self.last_status_counts = counts
# inform the scheduler about the progress
task.publish_progress(100. * n_finished / n_jobs)
# log newly failed jobs
if newly_failed_jobs:
print("{} failed job(s) in task {}:".format(len(newly_failed_jobs), task.task_id))
tmpl = " job: {}, branches: {}, id: {job_id}, status: {status}, code: {code}, " \
"error: {error}"
for i, (job_num, data) in enumerate(six.iteritems(newly_failed_jobs)):
branches = self.submission_data.jobs[job_num]["branches"]
print(tmpl.format(job_num, ",".join(str(b) for b in branches), **data))
if i + 1 >= self.show_errors:
remaining = len(newly_failed_jobs) - self.show_errors
if remaining > 0:
print(" ... and {} more".format(remaining))
break
# infer the overall status
reached_end = n_jobs == n_finished + n_failed
finished = n_finished >= self.poll_data.n_finished_min
failed = n_failed > self.poll_data.n_failed_max
unreachable = n_jobs - n_failed < self.poll_data.n_finished_min
if finished:
# write status output
if "status" in self._outputs:
status_data = self.status_data_cls()
status_data.jobs.update(finished_jobs)
status_data.jobs.update(states)
self._outputs["status"].dump(status_data, formatter="json", indent=4)
break
elif failed:
failed_nums = [job_num for job_num in failed_jobs if job_num not in retry_jobs]
raise Exception("tolerance exceeded for jobs {}".format(failed_nums))
elif unreachable:
err = None
if reached_end:
err = "acceptance of {} not reached, total jobs: {}, failed jobs: {}"
elif task.check_unreachable_acceptance:
err = "acceptance of {} unreachable, total jobs: {}, failed jobs: {}"
if err:
raise Exception(err.format(self.poll_data.n_finished_min, n_jobs, n_failed))
# configurable poll callback
task.poll_callback(self.poll_data)
# trigger automatic resubmission and submission of unsubmitted jobs
self.submit(retry_jobs)
# break when no polling is desired
# we can get to this point when there was already a submission and the no_poll
# parameter was set so that only failed jobs are resubmitted once
if task.no_poll:
break
else:
# walltime exceeded
raise Exception("walltime exceeded") | python | def poll(self):
"""
Initiates the job status polling loop.
"""
task = self.task
# total job count
n_jobs = len(self.submission_data)
# store the number of consecutive polling failures and get the maximum number of polls
n_poll_fails = 0
if task.walltime == NO_FLOAT:
max_polls = sys.maxsize
else:
max_polls = int(math.ceil((task.walltime * 3600.) / (task.poll_interval * 60.)))
# update variable attributes for polling
self.poll_data.n_finished_min = task.acceptance * (1 if task.acceptance > 1 else n_jobs)
self.poll_data.n_failed_max = task.tolerance * (1 if task.tolerance > 1 else n_jobs)
# track finished and failed jobs in dicts holding status data
finished_jobs = OrderedDict()
failed_jobs = OrderedDict()
# start the poll loop
for i in six.moves.range(max_polls):
# sleep after the first iteration
if i > 0:
time.sleep(task.poll_interval * 60)
# determine the currently active jobs, i.e., the jobs whose states should be checked
active_jobs = OrderedDict()
for job_num, data in six.iteritems(self.submission_data.jobs):
if job_num in finished_jobs or job_num in failed_jobs:
continue
if self.skip_jobs.get(job_num):
finished_jobs[job_num] = self.status_data_cls.job_data(
status=self.job_manager.FINISHED, code=0)
else:
data = data.copy()
if not data["job_id"]:
data["job_id"] = self.status_data_cls.dummy_job_id
active_jobs[job_num] = data
self.poll_data.n_active = len(active_jobs)
# query job states
job_ids = [data["job_id"] for data in six.itervalues(active_jobs)] # noqa: F812
_states, errors = self.job_manager.query_batch(job_ids)
if errors:
print("{} error(s) occured during job status query of task {}:".format(
len(errors), task.task_id))
tmpl = " {}"
for i, err in enumerate(errors):
print(tmpl.format(err))
if i + 1 >= self.show_errors:
remaining = len(errors) - self.show_errors
if remaining > 0:
print(" ... and {} more".format(remaining))
break
n_poll_fails += 1
if task.poll_fails > 0 and n_poll_fails > task.poll_fails:
raise Exception("poll_fails exceeded")
else:
continue
else:
n_poll_fails = 0
# states stores job_id's as keys, so replace them by using job_num's
states = OrderedDict()
for job_num, data in six.iteritems(active_jobs):
states[job_num] = self.status_data_cls.job_data(**_states[data["job_id"]])
# store jobs per status and take further actions depending on the status
pending_jobs = OrderedDict()
running_jobs = OrderedDict()
newly_failed_jobs = OrderedDict()
retry_jobs = OrderedDict()
for job_num, data in six.iteritems(states):
if data["status"] == self.job_manager.PENDING:
pending_jobs[job_num] = data
task.forward_dashboard_event(self.dashboard, data, "status.pending", job_num)
elif data["status"] == self.job_manager.RUNNING:
running_jobs[job_num] = data
task.forward_dashboard_event(self.dashboard, data, "status.running", job_num)
elif data["status"] == self.job_manager.FINISHED:
finished_jobs[job_num] = data
self.poll_data.n_active -= 1
task.forward_dashboard_event(self.dashboard, data, "status.finished", job_num)
elif data["status"] in (self.job_manager.FAILED, self.job_manager.RETRY):
newly_failed_jobs[job_num] = data
self.poll_data.n_active -= 1
# retry or ultimately failed?
if self.job_retries[job_num] < task.retries:
self.job_retries[job_num] += 1
self.submission_data.attempts[job_num] += 1
data["status"] = self.job_manager.RETRY
retry_jobs[job_num] = self.submission_data.jobs[job_num]["branches"]
task.forward_dashboard_event(self.dashboard, data, "status.retry", job_num)
else:
failed_jobs[job_num] = data
task.forward_dashboard_event(self.dashboard, data, "status.failed", job_num)
else:
raise Exception("unknown job status '{}'".format(data["status"]))
# gather some counts
n_pending = len(pending_jobs)
n_running = len(running_jobs)
n_finished = len(finished_jobs)
n_retry = len(retry_jobs)
n_failed = len(failed_jobs)
n_unsubmitted = len(self.submission_data.unsubmitted_jobs)
# log the status line
counts = (n_pending, n_running, n_finished, n_retry, n_failed)
if task.parallel_jobs > 0:
counts = (n_unsubmitted,) + counts
status_line = self.job_manager.status_line(counts, last_counts=True, sum_counts=n_jobs,
color=True, align=task.align_status_line)
task.publish_message(status_line)
self.last_status_counts = counts
# inform the scheduler about the progress
task.publish_progress(100. * n_finished / n_jobs)
# log newly failed jobs
if newly_failed_jobs:
print("{} failed job(s) in task {}:".format(len(newly_failed_jobs), task.task_id))
tmpl = " job: {}, branches: {}, id: {job_id}, status: {status}, code: {code}, " \
"error: {error}"
for i, (job_num, data) in enumerate(six.iteritems(newly_failed_jobs)):
branches = self.submission_data.jobs[job_num]["branches"]
print(tmpl.format(job_num, ",".join(str(b) for b in branches), **data))
if i + 1 >= self.show_errors:
remaining = len(newly_failed_jobs) - self.show_errors
if remaining > 0:
print(" ... and {} more".format(remaining))
break
# infer the overall status
reached_end = n_jobs == n_finished + n_failed
finished = n_finished >= self.poll_data.n_finished_min
failed = n_failed > self.poll_data.n_failed_max
unreachable = n_jobs - n_failed < self.poll_data.n_finished_min
if finished:
# write status output
if "status" in self._outputs:
status_data = self.status_data_cls()
status_data.jobs.update(finished_jobs)
status_data.jobs.update(states)
self._outputs["status"].dump(status_data, formatter="json", indent=4)
break
elif failed:
failed_nums = [job_num for job_num in failed_jobs if job_num not in retry_jobs]
raise Exception("tolerance exceeded for jobs {}".format(failed_nums))
elif unreachable:
err = None
if reached_end:
err = "acceptance of {} not reached, total jobs: {}, failed jobs: {}"
elif task.check_unreachable_acceptance:
err = "acceptance of {} unreachable, total jobs: {}, failed jobs: {}"
if err:
raise Exception(err.format(self.poll_data.n_finished_min, n_jobs, n_failed))
# configurable poll callback
task.poll_callback(self.poll_data)
# trigger automatic resubmission and submission of unsubmitted jobs
self.submit(retry_jobs)
# break when no polling is desired
# we can get to this point when there was already a submission and the no_poll
# parameter was set so that only failed jobs are resubmitted once
if task.no_poll:
break
else:
# walltime exceeded
raise Exception("walltime exceeded") | Initiates the job status polling loop. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/workflow/remote.py#L599-L781 |
riga/law | law/workflow/remote.py | BaseRemoteWorkflowProxy.touch_control_outputs | def touch_control_outputs(self):
"""
Creates and saves dummy submission and status files. This method is called in case the
collection of branch task outputs exists.
"""
task = self.task
# create the parent directory
self._outputs["submission"].parent.touch()
# get all branch indexes and chunk them by tasks_per_job
branch_chunks = list(iter_chunks(task.branch_map.keys(), task.tasks_per_job))
# submission output
if not self._outputs["submission"].exists():
submission_data = self.submission_data.copy()
# set dummy submission data
submission_data.jobs.clear()
for i, branches in enumerate(branch_chunks):
job_num = i + 1
submission_data.jobs[job_num] = self.submission_data_cls.job_data(branches=branches)
self._outputs["submission"].dump(submission_data, formatter="json", indent=4)
# status output
if "status" in self._outputs and not self._outputs["status"].exists():
status_data = self.status_data_cls()
# set dummy status data
for i, branches in enumerate(branch_chunks):
job_num = i + 1
status_data.jobs[job_num] = self.status_data_cls.job_data(
status=self.job_manager.FINISHED, code=0)
self._outputs["status"].dump(status_data, formatter="json", indent=4) | python | def touch_control_outputs(self):
"""
Creates and saves dummy submission and status files. This method is called in case the
collection of branch task outputs exists.
"""
task = self.task
# create the parent directory
self._outputs["submission"].parent.touch()
# get all branch indexes and chunk them by tasks_per_job
branch_chunks = list(iter_chunks(task.branch_map.keys(), task.tasks_per_job))
# submission output
if not self._outputs["submission"].exists():
submission_data = self.submission_data.copy()
# set dummy submission data
submission_data.jobs.clear()
for i, branches in enumerate(branch_chunks):
job_num = i + 1
submission_data.jobs[job_num] = self.submission_data_cls.job_data(branches=branches)
self._outputs["submission"].dump(submission_data, formatter="json", indent=4)
# status output
if "status" in self._outputs and not self._outputs["status"].exists():
status_data = self.status_data_cls()
# set dummy status data
for i, branches in enumerate(branch_chunks):
job_num = i + 1
status_data.jobs[job_num] = self.status_data_cls.job_data(
status=self.job_manager.FINISHED, code=0)
self._outputs["status"].dump(status_data, formatter="json", indent=4) | Creates and saves dummy submission and status files. This method is called in case the
collection of branch task outputs exists. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/workflow/remote.py#L783-L814 |
riga/law | law/workflow/remote.py | BaseRemoteWorkflow.forward_dashboard_event | def forward_dashboard_event(self, dashboard, job_data, event, job_num):
"""
Hook to preprocess and publish dashboard events. By default, every event is passed to the
dashboard's :py:meth:`law.job.dashboard.BaseJobDashboard.publish` method unchanged.
"""
# possible events:
# - action.submit
# - action.cancel
# - status.pending
# - status.running
# - status.finished
# - status.retry
# - status.failed
# forward to dashboard in any event by default
return dashboard.publish(job_data, event, job_num) | python | def forward_dashboard_event(self, dashboard, job_data, event, job_num):
"""
Hook to preprocess and publish dashboard events. By default, every event is passed to the
dashboard's :py:meth:`law.job.dashboard.BaseJobDashboard.publish` method unchanged.
"""
# possible events:
# - action.submit
# - action.cancel
# - status.pending
# - status.running
# - status.finished
# - status.retry
# - status.failed
# forward to dashboard in any event by default
return dashboard.publish(job_data, event, job_num) | Hook to preprocess and publish dashboard events. By default, every event is passed to the
dashboard's :py:meth:`law.job.dashboard.BaseJobDashboard.publish` method unchanged. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/workflow/remote.py#L973-L987 |
riga/law | law/target/local.py | LocalFileTarget.localize | def localize(self, mode="r", perm=None, parent_perm=None, **kwargs):
""" localize(mode="r", perm=None, parent_perm=None, skip_copy=False, is_tmp=None, **kwargs)
"""
if mode not in ("r", "w"):
raise Exception("unknown mode '{}', use r or w".format(mode))
# get additional arguments
skip_copy = kwargs.pop("skip_copy", False)
is_tmp = kwargs.pop("is_tmp", mode == "w")
if mode == "r":
if is_tmp:
# create a temporary target
tmp = self.__class__(is_tmp=self.ext(n=1) or True)
# always copy
self.copy_to_local(tmp)
# yield the copy
try:
yield tmp
finally:
tmp.remove()
else:
# simply yield
yield self
else: # write mode
if is_tmp:
# create a temporary target
tmp = self.__class__(is_tmp=self.ext(n=1) or True)
# copy when existing
if not skip_copy and self.exists():
self.copy_to_local(tmp)
# yield the copy
try:
yield tmp
# move back again
if tmp.exists():
tmp.move_to_local(self, dir_perm=parent_perm)
self.chmod(perm)
else:
logger.warning("cannot move non-existing localized file target {!r}".format(
self))
finally:
tmp.remove()
else:
# create the parent dir
self.parent.touch(perm=parent_perm)
# simply yield
yield self
if self.exists():
self.chmod(perm) | python | def localize(self, mode="r", perm=None, parent_perm=None, **kwargs):
""" localize(mode="r", perm=None, parent_perm=None, skip_copy=False, is_tmp=None, **kwargs)
"""
if mode not in ("r", "w"):
raise Exception("unknown mode '{}', use r or w".format(mode))
# get additional arguments
skip_copy = kwargs.pop("skip_copy", False)
is_tmp = kwargs.pop("is_tmp", mode == "w")
if mode == "r":
if is_tmp:
# create a temporary target
tmp = self.__class__(is_tmp=self.ext(n=1) or True)
# always copy
self.copy_to_local(tmp)
# yield the copy
try:
yield tmp
finally:
tmp.remove()
else:
# simply yield
yield self
else: # write mode
if is_tmp:
# create a temporary target
tmp = self.__class__(is_tmp=self.ext(n=1) or True)
# copy when existing
if not skip_copy and self.exists():
self.copy_to_local(tmp)
# yield the copy
try:
yield tmp
# move back again
if tmp.exists():
tmp.move_to_local(self, dir_perm=parent_perm)
self.chmod(perm)
else:
logger.warning("cannot move non-existing localized file target {!r}".format(
self))
finally:
tmp.remove()
else:
# create the parent dir
self.parent.touch(perm=parent_perm)
# simply yield
yield self
if self.exists():
self.chmod(perm) | localize(mode="r", perm=None, parent_perm=None, skip_copy=False, is_tmp=None, **kwargs) | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/target/local.py#L252-L309 |
riga/law | law/cli/index.py | setup_parser | def setup_parser(sub_parsers):
"""
Sets up the command line parser for the *index* subprogram and adds it to *sub_parsers*.
"""
parser = sub_parsers.add_parser("index", prog="law index", description="Create or update the"
" (human-readable) law task index file ({}). This is only required for the shell"
" auto-completion.".format(Config.instance().get("core", "index_file")))
parser.add_argument("--modules", "-m", nargs="+", help="additional modules to traverse")
parser.add_argument("--no-externals", "-e", action="store_true", help="skip external tasks")
parser.add_argument("--remove", "-r", action="store_true", help="remove the index file and"
" exit")
parser.add_argument("--location", "-l", action="store_true", help="print the location of the"
" index file and exit")
parser.add_argument("--verbose", "-v", action="store_true", help="verbose output") | python | def setup_parser(sub_parsers):
"""
Sets up the command line parser for the *index* subprogram and adds it to *sub_parsers*.
"""
parser = sub_parsers.add_parser("index", prog="law index", description="Create or update the"
" (human-readable) law task index file ({}). This is only required for the shell"
" auto-completion.".format(Config.instance().get("core", "index_file")))
parser.add_argument("--modules", "-m", nargs="+", help="additional modules to traverse")
parser.add_argument("--no-externals", "-e", action="store_true", help="skip external tasks")
parser.add_argument("--remove", "-r", action="store_true", help="remove the index file and"
" exit")
parser.add_argument("--location", "-l", action="store_true", help="print the location of the"
" index file and exit")
parser.add_argument("--verbose", "-v", action="store_true", help="verbose output") | Sets up the command line parser for the *index* subprogram and adds it to *sub_parsers*. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/cli/index.py#L22-L36 |
riga/law | law/cli/index.py | execute | def execute(args):
"""
Executes the *index* subprogram with parsed commandline *args*.
"""
index_file = Config.instance().get_expanded("core", "index_file")
# just print the file location?
if args.location:
print(index_file)
return
# just remove the index file?
if args.remove:
if os.path.exists(index_file):
os.remove(index_file)
print("removed index file {}".format(index_file))
return
# get modules to lookup
lookup = [m.strip() for m in Config.instance().keys("modules")]
if args.modules:
lookup += args.modules
print("loading tasks from {} module(s)".format(len(lookup)))
# loop through modules, import everything to load tasks
for modid in lookup:
if not modid:
continue
if args.verbose:
sys.stdout.write("loading module '{}'".format(modid))
try:
import_module(modid)
except Exception as e:
if not args.verbose:
print("Error in module '{}': {}".format(colored(modid, "red"), str(e)))
else:
print("\n\nError in module '{}':".format(colored(modid, "red")))
traceback.print_exc()
continue
if args.verbose:
print(", {}".format(colored("done", style="bright")))
# determine tasks to write into the index file
seen_families = []
task_classes = []
lookup = [Task]
while lookup:
cls = lookup.pop(0)
lookup.extend(cls.__subclasses__())
# skip already seen task families
if cls.task_family in seen_families:
continue
seen_families.append(cls.task_family)
# skip when explicitly excluded
if cls.exclude_index:
continue
# skip external tasks
is_external_task = issubclass(cls, ExternalTask)
if args.no_externals and is_external_task:
continue
# skip non-external tasks without run implementation
run_is_callable = callable(getattr(cls, "run", None))
run_is_abstract = getattr(cls.run, "__isabstractmethod__", False)
if not is_external_task and (not run_is_callable or run_is_abstract):
continue
task_classes.append(cls)
def get_task_params(cls):
params = []
for attr in dir(cls):
member = getattr(cls, attr)
if isinstance(member, luigi.Parameter):
exclude = getattr(cls, "exclude_params_index", set())
if not multi_match(attr, exclude, any):
params.append(attr.replace("_", "-"))
return params
def index_line(cls, params):
# format: "module_id:task_family:param param ..."
return "{}:{}:{}".format(cls.__module__, cls.task_family, " ".join(params))
stats = OrderedDict()
# write the index file
if not os.path.exists(os.path.dirname(index_file)):
os.makedirs(os.path.dirname(index_file))
with open(index_file, "w") as f:
for cls in task_classes:
# get prams
params = get_task_params(cls)
# fill stats
if cls.__module__ not in stats:
stats[cls.__module__] = []
stats[cls.__module__].append((cls.task_family, params))
f.write(index_line(cls, params) + "\n")
# print stats
if args.verbose:
for mod, data in six.iteritems(stats):
print("\nmodule '{}', {} task(s):".format(colored(mod, style="bright"), len(data)))
for task_family, _ in data:
print(" - {}".format(colored(task_family, "green")))
print("")
print("written {} task(s) to index file '{}'".format(len(task_classes), index_file)) | python | def execute(args):
"""
Executes the *index* subprogram with parsed commandline *args*.
"""
index_file = Config.instance().get_expanded("core", "index_file")
# just print the file location?
if args.location:
print(index_file)
return
# just remove the index file?
if args.remove:
if os.path.exists(index_file):
os.remove(index_file)
print("removed index file {}".format(index_file))
return
# get modules to lookup
lookup = [m.strip() for m in Config.instance().keys("modules")]
if args.modules:
lookup += args.modules
print("loading tasks from {} module(s)".format(len(lookup)))
# loop through modules, import everything to load tasks
for modid in lookup:
if not modid:
continue
if args.verbose:
sys.stdout.write("loading module '{}'".format(modid))
try:
import_module(modid)
except Exception as e:
if not args.verbose:
print("Error in module '{}': {}".format(colored(modid, "red"), str(e)))
else:
print("\n\nError in module '{}':".format(colored(modid, "red")))
traceback.print_exc()
continue
if args.verbose:
print(", {}".format(colored("done", style="bright")))
# determine tasks to write into the index file
seen_families = []
task_classes = []
lookup = [Task]
while lookup:
cls = lookup.pop(0)
lookup.extend(cls.__subclasses__())
# skip already seen task families
if cls.task_family in seen_families:
continue
seen_families.append(cls.task_family)
# skip when explicitly excluded
if cls.exclude_index:
continue
# skip external tasks
is_external_task = issubclass(cls, ExternalTask)
if args.no_externals and is_external_task:
continue
# skip non-external tasks without run implementation
run_is_callable = callable(getattr(cls, "run", None))
run_is_abstract = getattr(cls.run, "__isabstractmethod__", False)
if not is_external_task and (not run_is_callable or run_is_abstract):
continue
task_classes.append(cls)
def get_task_params(cls):
params = []
for attr in dir(cls):
member = getattr(cls, attr)
if isinstance(member, luigi.Parameter):
exclude = getattr(cls, "exclude_params_index", set())
if not multi_match(attr, exclude, any):
params.append(attr.replace("_", "-"))
return params
def index_line(cls, params):
# format: "module_id:task_family:param param ..."
return "{}:{}:{}".format(cls.__module__, cls.task_family, " ".join(params))
stats = OrderedDict()
# write the index file
if not os.path.exists(os.path.dirname(index_file)):
os.makedirs(os.path.dirname(index_file))
with open(index_file, "w") as f:
for cls in task_classes:
# get prams
params = get_task_params(cls)
# fill stats
if cls.__module__ not in stats:
stats[cls.__module__] = []
stats[cls.__module__].append((cls.task_family, params))
f.write(index_line(cls, params) + "\n")
# print stats
if args.verbose:
for mod, data in six.iteritems(stats):
print("\nmodule '{}', {} task(s):".format(colored(mod, style="bright"), len(data)))
for task_family, _ in data:
print(" - {}".format(colored(task_family, "green")))
print("")
print("written {} task(s) to index file '{}'".format(len(task_classes), index_file)) | Executes the *index* subprogram with parsed commandline *args*. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/cli/index.py#L39-L155 |
riga/law | law/cli/index.py | get_global_parameters | def get_global_parameters(config_names=("core", "scheduler", "worker", "retcode")):
"""
Returns a list of global, luigi-internal configuration parameters. Each list item is a 4-tuple
containing the configuration class, the parameter instance, the parameter name, and the full
parameter name in the cli. When *config_names* is set, it should be a list of configuration
class names that are exclusively taken into account.
"""
params = []
for cls in luigi.task.Config.__subclasses__():
if config_names and cls.__name__ not in config_names:
continue
for attr in dir(cls):
param = getattr(cls, attr)
if not isinstance(param, luigi.Parameter):
continue
full_name = attr.replace("_", "-")
if getattr(cls, "use_cmdline_section", True):
full_name = "{}-{}".format(cls.__name__.replace("_", "-"), full_name)
params.append((cls, param, attr, full_name))
return params | python | def get_global_parameters(config_names=("core", "scheduler", "worker", "retcode")):
"""
Returns a list of global, luigi-internal configuration parameters. Each list item is a 4-tuple
containing the configuration class, the parameter instance, the parameter name, and the full
parameter name in the cli. When *config_names* is set, it should be a list of configuration
class names that are exclusively taken into account.
"""
params = []
for cls in luigi.task.Config.__subclasses__():
if config_names and cls.__name__ not in config_names:
continue
for attr in dir(cls):
param = getattr(cls, attr)
if not isinstance(param, luigi.Parameter):
continue
full_name = attr.replace("_", "-")
if getattr(cls, "use_cmdline_section", True):
full_name = "{}-{}".format(cls.__name__.replace("_", "-"), full_name)
params.append((cls, param, attr, full_name))
return params | Returns a list of global, luigi-internal configuration parameters. Each list item is a 4-tuple
containing the configuration class, the parameter instance, the parameter name, and the full
parameter name in the cli. When *config_names* is set, it should be a list of configuration
class names that are exclusively taken into account. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/cli/index.py#L158-L181 |
riga/law | law/util.py | rel_path | def rel_path(anchor, *paths):
"""
Returns a path made of framgment *paths* relativ to an *anchor* path. When *anchor* is a file,
its absolute directory is used instead.
"""
anchor = os.path.abspath(os.path.expandvars(os.path.expanduser(anchor)))
if os.path.exists(anchor) and os.path.isfile(anchor):
anchor = os.path.dirname(anchor)
return os.path.normpath(os.path.join(anchor, *paths)) | python | def rel_path(anchor, *paths):
"""
Returns a path made of framgment *paths* relativ to an *anchor* path. When *anchor* is a file,
its absolute directory is used instead.
"""
anchor = os.path.abspath(os.path.expandvars(os.path.expanduser(anchor)))
if os.path.exists(anchor) and os.path.isfile(anchor):
anchor = os.path.dirname(anchor)
return os.path.normpath(os.path.join(anchor, *paths)) | Returns a path made of framgment *paths* relativ to an *anchor* path. When *anchor* is a file,
its absolute directory is used instead. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L54-L62 |
riga/law | law/util.py | law_home_path | def law_home_path(*paths):
"""
Returns the law home directory (``$LAW_HOME``) that defaults to ``"$HOME/.law"``, optionally
joined with *paths*.
"""
home = os.getenv("LAW_HOME", "$HOME/.law")
home = os.path.expandvars(os.path.expanduser(home))
return os.path.normpath(os.path.join(home, *paths)) | python | def law_home_path(*paths):
"""
Returns the law home directory (``$LAW_HOME``) that defaults to ``"$HOME/.law"``, optionally
joined with *paths*.
"""
home = os.getenv("LAW_HOME", "$HOME/.law")
home = os.path.expandvars(os.path.expanduser(home))
return os.path.normpath(os.path.join(home, *paths)) | Returns the law home directory (``$LAW_HOME``) that defaults to ``"$HOME/.law"``, optionally
joined with *paths*. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L72-L79 |
riga/law | law/util.py | print_err | def print_err(*args, **kwargs):
""" print_err(*args, flush=False)
Same as *print*, but outputs to stderr. If *flush* is *True*, stderr is flushed after printing.
"""
sys.stderr.write(" ".join(str(arg) for arg in args) + "\n")
if kwargs.get("flush", False):
sys.stderr.flush() | python | def print_err(*args, **kwargs):
""" print_err(*args, flush=False)
Same as *print*, but outputs to stderr. If *flush* is *True*, stderr is flushed after printing.
"""
sys.stderr.write(" ".join(str(arg) for arg in args) + "\n")
if kwargs.get("flush", False):
sys.stderr.flush() | print_err(*args, flush=False)
Same as *print*, but outputs to stderr. If *flush* is *True*, stderr is flushed after printing. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L82-L88 |
riga/law | law/util.py | abort | def abort(msg=None, exitcode=1):
"""
Aborts the process (*sys.exit*) with an *exitcode*. If *msg* is not *None*, it is printed first
to stdout if *exitcode* is 0 or *None*, and to stderr otherwise.
"""
if msg is not None:
if exitcode in (None, 0):
print(msg)
else:
print_err(msg)
sys.exit(exitcode) | python | def abort(msg=None, exitcode=1):
"""
Aborts the process (*sys.exit*) with an *exitcode*. If *msg* is not *None*, it is printed first
to stdout if *exitcode* is 0 or *None*, and to stderr otherwise.
"""
if msg is not None:
if exitcode in (None, 0):
print(msg)
else:
print_err(msg)
sys.exit(exitcode) | Aborts the process (*sys.exit*) with an *exitcode*. If *msg* is not *None*, it is printed first
to stdout if *exitcode* is 0 or *None*, and to stderr otherwise. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L91-L101 |
riga/law | law/util.py | colored | def colored(msg, color=None, background=None, style=None, force=False):
"""
Return the colored version of a string *msg*. For *color*, *background* and *style* options, see
https://misc.flogisoft.com/bash/tip_colors_and_formatting. Unless *force* is *True*, the *msg*
string is returned unchanged in case the output is not a tty.
"""
try:
if not force and not os.isatty(sys.stdout.fileno()):
return msg
except:
return msg
color = colors.get(color, colors["default"])
background = backgrounds.get(background, backgrounds["default"])
if not isinstance(style, (tuple, list, set)):
style = (style,)
style = ";".join(str(styles.get(s, styles["default"])) for s in style)
return "\033[{};{};{}m{}\033[0m".format(style, background, color, msg) | python | def colored(msg, color=None, background=None, style=None, force=False):
"""
Return the colored version of a string *msg*. For *color*, *background* and *style* options, see
https://misc.flogisoft.com/bash/tip_colors_and_formatting. Unless *force* is *True*, the *msg*
string is returned unchanged in case the output is not a tty.
"""
try:
if not force and not os.isatty(sys.stdout.fileno()):
return msg
except:
return msg
color = colors.get(color, colors["default"])
background = backgrounds.get(background, backgrounds["default"])
if not isinstance(style, (tuple, list, set)):
style = (style,)
style = ";".join(str(styles.get(s, styles["default"])) for s in style)
return "\033[{};{};{}m{}\033[0m".format(style, background, color, msg) | Return the colored version of a string *msg*. For *color*, *background* and *style* options, see
https://misc.flogisoft.com/bash/tip_colors_and_formatting. Unless *force* is *True*, the *msg*
string is returned unchanged in case the output is not a tty. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L157-L176 |
riga/law | law/util.py | query_choice | def query_choice(msg, choices, default=None, descriptions=None, lower=True):
"""
Interactively query a choice from the prompt until the input matches one of the *choices*. The
prompt can be configured using *msg* and *descriptions*, which, if set, must have the same
length as *choices*. When *default* is not *None* it must be one of the choices and is used when
the input is empty. When *lower* is *True*, the input is compared to the choices in lower case.
"""
choices = _choices = [str(c) for c in choices]
if lower:
_choices = [c.lower() for c in choices]
if default is not None:
if default not in choices:
raise Exception("default must be one of the choices")
hints = [(choice if choice != default else choice + "*") for choice in choices]
if descriptions is not None:
if len(descriptions) != len(choices):
raise ValueError("length of descriptions must match length of choices")
hints = ["{}({})".format(*tpl) for tpl in zip(hints, descriptions)]
msg += " [{}] ".format(", ".join(hints))
choice = None
while choice not in _choices:
if choice is not None:
print("invalid choice: '{}'\n".format(choice))
choice = six.moves.input(msg)
if default is not None and choice == "":
choice = default
if lower:
choice = choice.lower()
return choice | python | def query_choice(msg, choices, default=None, descriptions=None, lower=True):
"""
Interactively query a choice from the prompt until the input matches one of the *choices*. The
prompt can be configured using *msg* and *descriptions*, which, if set, must have the same
length as *choices*. When *default* is not *None* it must be one of the choices and is used when
the input is empty. When *lower* is *True*, the input is compared to the choices in lower case.
"""
choices = _choices = [str(c) for c in choices]
if lower:
_choices = [c.lower() for c in choices]
if default is not None:
if default not in choices:
raise Exception("default must be one of the choices")
hints = [(choice if choice != default else choice + "*") for choice in choices]
if descriptions is not None:
if len(descriptions) != len(choices):
raise ValueError("length of descriptions must match length of choices")
hints = ["{}({})".format(*tpl) for tpl in zip(hints, descriptions)]
msg += " [{}] ".format(", ".join(hints))
choice = None
while choice not in _choices:
if choice is not None:
print("invalid choice: '{}'\n".format(choice))
choice = six.moves.input(msg)
if default is not None and choice == "":
choice = default
if lower:
choice = choice.lower()
return choice | Interactively query a choice from the prompt until the input matches one of the *choices*. The
prompt can be configured using *msg* and *descriptions*, which, if set, must have the same
length as *choices*. When *default* is not *None* it must be one of the choices and is used when
the input is empty. When *lower* is *True*, the input is compared to the choices in lower case. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L186-L218 |
riga/law | law/util.py | multi_match | def multi_match(name, patterns, mode=any, regex=False):
"""
Compares *name* to multiple *patterns* and returns *True* in case of at least one match (*mode*
= *any*, the default), or in case all patterns matched (*mode* = *all*). Otherwise, *False* is
returned. When *regex* is *True*, *re.match* is used instead of *fnmatch.fnmatch*.
"""
if not regex:
return mode(fnmatch.fnmatch(name, pattern) for pattern in patterns)
else:
return mode(re.match(pattern, name) for pattern in patterns) | python | def multi_match(name, patterns, mode=any, regex=False):
"""
Compares *name* to multiple *patterns* and returns *True* in case of at least one match (*mode*
= *any*, the default), or in case all patterns matched (*mode* = *all*). Otherwise, *False* is
returned. When *regex* is *True*, *re.match* is used instead of *fnmatch.fnmatch*.
"""
if not regex:
return mode(fnmatch.fnmatch(name, pattern) for pattern in patterns)
else:
return mode(re.match(pattern, name) for pattern in patterns) | Compares *name* to multiple *patterns* and returns *True* in case of at least one match (*mode*
= *any*, the default), or in case all patterns matched (*mode* = *all*). Otherwise, *False* is
returned. When *regex* is *True*, *re.match* is used instead of *fnmatch.fnmatch*. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L221-L230 |
riga/law | law/util.py | is_lazy_iterable | def is_lazy_iterable(obj):
"""
Returns whether *obj* is iterable lazily, such as generators, range objects, etc.
"""
return isinstance(obj,
(types.GeneratorType, collections.MappingView, six.moves.range, enumerate)) | python | def is_lazy_iterable(obj):
"""
Returns whether *obj* is iterable lazily, such as generators, range objects, etc.
"""
return isinstance(obj,
(types.GeneratorType, collections.MappingView, six.moves.range, enumerate)) | Returns whether *obj* is iterable lazily, such as generators, range objects, etc. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L233-L238 |
riga/law | law/util.py | make_list | def make_list(obj, cast=True):
"""
Converts an object *obj* to a list and returns it. Objects of types *tuple* and *set* are
converted if *cast* is *True*. Otherwise, and for all other types, *obj* is put in a new list.
"""
if isinstance(obj, list):
return list(obj)
elif is_lazy_iterable(obj):
return list(obj)
elif isinstance(obj, (tuple, set)) and cast:
return list(obj)
else:
return [obj] | python | def make_list(obj, cast=True):
"""
Converts an object *obj* to a list and returns it. Objects of types *tuple* and *set* are
converted if *cast* is *True*. Otherwise, and for all other types, *obj* is put in a new list.
"""
if isinstance(obj, list):
return list(obj)
elif is_lazy_iterable(obj):
return list(obj)
elif isinstance(obj, (tuple, set)) and cast:
return list(obj)
else:
return [obj] | Converts an object *obj* to a list and returns it. Objects of types *tuple* and *set* are
converted if *cast* is *True*. Otherwise, and for all other types, *obj* is put in a new list. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L241-L253 |
riga/law | law/util.py | make_tuple | def make_tuple(obj, cast=True):
"""
Converts an object *obj* to a tuple and returns it. Objects of types *list* and *set* are
converted if *cast* is *True*. Otherwise, and for all other types, *obj* is put in a new tuple.
"""
if isinstance(obj, tuple):
return tuple(obj)
elif is_lazy_iterable(obj):
return tuple(obj)
elif isinstance(obj, (list, set)) and cast:
return tuple(obj)
else:
return (obj,) | python | def make_tuple(obj, cast=True):
"""
Converts an object *obj* to a tuple and returns it. Objects of types *list* and *set* are
converted if *cast* is *True*. Otherwise, and for all other types, *obj* is put in a new tuple.
"""
if isinstance(obj, tuple):
return tuple(obj)
elif is_lazy_iterable(obj):
return tuple(obj)
elif isinstance(obj, (list, set)) and cast:
return tuple(obj)
else:
return (obj,) | Converts an object *obj* to a tuple and returns it. Objects of types *list* and *set* are
converted if *cast* is *True*. Otherwise, and for all other types, *obj* is put in a new tuple. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L256-L268 |
riga/law | law/util.py | flatten | def flatten(struct):
"""
Flattens and returns a complex structured object *struct*.
"""
if isinstance(struct, dict):
return flatten(struct.values())
elif isinstance(struct, (list, tuple, set)) or is_lazy_iterable(struct):
objs = []
for obj in struct:
objs.extend(flatten(obj))
return objs
else:
return [struct] | python | def flatten(struct):
"""
Flattens and returns a complex structured object *struct*.
"""
if isinstance(struct, dict):
return flatten(struct.values())
elif isinstance(struct, (list, tuple, set)) or is_lazy_iterable(struct):
objs = []
for obj in struct:
objs.extend(flatten(obj))
return objs
else:
return [struct] | Flattens and returns a complex structured object *struct*. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L271-L283 |
riga/law | law/util.py | merge_dicts | def merge_dicts(*dicts, **kwargs):
""" merge_dicts(*dicts, cls=None)
Takes multiple *dicts* and returns a single merged dict. The merging takes place in order of the
passed dicts and therefore, values of rear objects have precedence in case of field collisions.
The class of the returned merged dict is configurable via *cls*. If it is *None*, the class is
inferred from the first dict object in *dicts*.
"""
# get or infer the class
cls = kwargs.get("cls", None)
if cls is None:
for d in dicts:
if isinstance(d, dict):
cls = d.__class__
break
else:
raise TypeError("cannot infer cls as none of the passed objects is of type dict")
# start merging
merged_dict = cls()
for d in dicts:
if isinstance(d, dict):
merged_dict.update(d)
return merged_dict | python | def merge_dicts(*dicts, **kwargs):
""" merge_dicts(*dicts, cls=None)
Takes multiple *dicts* and returns a single merged dict. The merging takes place in order of the
passed dicts and therefore, values of rear objects have precedence in case of field collisions.
The class of the returned merged dict is configurable via *cls*. If it is *None*, the class is
inferred from the first dict object in *dicts*.
"""
# get or infer the class
cls = kwargs.get("cls", None)
if cls is None:
for d in dicts:
if isinstance(d, dict):
cls = d.__class__
break
else:
raise TypeError("cannot infer cls as none of the passed objects is of type dict")
# start merging
merged_dict = cls()
for d in dicts:
if isinstance(d, dict):
merged_dict.update(d)
return merged_dict | merge_dicts(*dicts, cls=None)
Takes multiple *dicts* and returns a single merged dict. The merging takes place in order of the
passed dicts and therefore, values of rear objects have precedence in case of field collisions.
The class of the returned merged dict is configurable via *cls*. If it is *None*, the class is
inferred from the first dict object in *dicts*. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L286-L309 |
riga/law | law/util.py | which | def which(prog):
"""
Pythonic ``which`` implementation. Returns the path to an executable *prog* by searching in
*PATH*, or *None* when it could not be found.
"""
executable = lambda path: os.path.isfile(path) and os.access(path, os.X_OK)
# prog can also be a path
dirname, _ = os.path.split(prog)
if dirname:
if executable(prog):
return prog
elif "PATH" in os.environ:
for search_path in os.environ["PATH"].split(os.pathsep):
path = os.path.join(search_path.strip('"'), prog)
if executable(path):
return path
return None | python | def which(prog):
"""
Pythonic ``which`` implementation. Returns the path to an executable *prog* by searching in
*PATH*, or *None* when it could not be found.
"""
executable = lambda path: os.path.isfile(path) and os.access(path, os.X_OK)
# prog can also be a path
dirname, _ = os.path.split(prog)
if dirname:
if executable(prog):
return prog
elif "PATH" in os.environ:
for search_path in os.environ["PATH"].split(os.pathsep):
path = os.path.join(search_path.strip('"'), prog)
if executable(path):
return path
return None | Pythonic ``which`` implementation. Returns the path to an executable *prog* by searching in
*PATH*, or *None* when it could not be found. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L312-L330 |
riga/law | law/util.py | map_verbose | def map_verbose(func, seq, msg="{}", every=25, start=True, end=True, offset=0, callback=None):
"""
Same as the built-in map function but prints a *msg* after chunks of size *every* iterations.
When *start* (*stop*) is *True*, the *msg* is also printed after the first (last) iteration.
Note that *msg* is supposed to be a template string that will be formatted with the current
iteration number (starting at 0) plus *offset* using ``str.format``. When *callback* is
callable, it is invoked instead of the default print method with the current iteration number
(without *offset*) as the only argument. Example:
.. code-block:: python
func = lambda x: x ** 2
msg = "computing square of {}"
squares = map_verbose(func, range(7), msg, every=3)
# ->
# computing square of 0
# computing square of 2
# computing square of 5
# computing square of 6
"""
# default callable
if not callable(callback):
def callback(i):
print(msg.format(i + offset))
results = []
for i, obj in enumerate(seq):
results.append(func(obj))
do_call = (start and i == 0) or (i + 1) % every == 0
if do_call:
callback(i)
else:
if end and results and not do_call:
callback(i)
return results | python | def map_verbose(func, seq, msg="{}", every=25, start=True, end=True, offset=0, callback=None):
"""
Same as the built-in map function but prints a *msg* after chunks of size *every* iterations.
When *start* (*stop*) is *True*, the *msg* is also printed after the first (last) iteration.
Note that *msg* is supposed to be a template string that will be formatted with the current
iteration number (starting at 0) plus *offset* using ``str.format``. When *callback* is
callable, it is invoked instead of the default print method with the current iteration number
(without *offset*) as the only argument. Example:
.. code-block:: python
func = lambda x: x ** 2
msg = "computing square of {}"
squares = map_verbose(func, range(7), msg, every=3)
# ->
# computing square of 0
# computing square of 2
# computing square of 5
# computing square of 6
"""
# default callable
if not callable(callback):
def callback(i):
print(msg.format(i + offset))
results = []
for i, obj in enumerate(seq):
results.append(func(obj))
do_call = (start and i == 0) or (i + 1) % every == 0
if do_call:
callback(i)
else:
if end and results and not do_call:
callback(i)
return results | Same as the built-in map function but prints a *msg* after chunks of size *every* iterations.
When *start* (*stop*) is *True*, the *msg* is also printed after the first (last) iteration.
Note that *msg* is supposed to be a template string that will be formatted with the current
iteration number (starting at 0) plus *offset* using ``str.format``. When *callback* is
callable, it is invoked instead of the default print method with the current iteration number
(without *offset*) as the only argument. Example:
.. code-block:: python
func = lambda x: x ** 2
msg = "computing square of {}"
squares = map_verbose(func, range(7), msg, every=3)
# ->
# computing square of 0
# computing square of 2
# computing square of 5
# computing square of 6 | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L333-L368 |
riga/law | law/util.py | map_struct | def map_struct(func, struct, cls=None, map_dict=True, map_list=True, map_tuple=False,
map_set=False):
"""
Applies a function *func* to each value of a complex structured object *struct* and returns the
output in the same structure. Example:
.. code-block:: python
struct = {"foo": [123, 456], "bar": [{"1": 1}, {"2": 2}]}
def times_two(i):
return i * 2
map_struct(struct, times_two)
# -> {"foo": [246, 912], "bar": [{"1": 2}, {"2": 4}]}
When *cls* is not *None*, it exclusively defines the class of objects that *func* is applied on.
All other objects are unchanged. *map_dict*, *map_list*, *map_tuple* and *map_set* configure if
objects of the respective types are traversed or mapped. The can be booleans or integer values
that define the depth of that setting in the struct.
"""
# interpret generators and views as lists
if is_lazy_iterable(struct):
struct = list(struct)
valid_types = tuple()
if map_dict:
valid_types += (dict,)
if isinstance(map_dict, int) and not isinstance(map_dict, bool):
map_dict -= 1
if map_list:
valid_types += (list,)
if isinstance(map_list, int) and not isinstance(map_list, bool):
map_list -= 1
if map_tuple:
valid_types += (tuple,)
if isinstance(map_tuple, int) and not isinstance(map_tuple, bool):
map_tuple -= 1
if map_set:
valid_types += (set,)
if isinstance(map_set, int) and not isinstance(map_set, bool):
map_set -= 1
# is an instance of cls?
if cls is not None and isinstance(struct, cls):
return func(struct)
# traverse?
elif isinstance(struct, valid_types):
# create a new struct, treat tuples as lists for itertative item appending
new_struct = struct.__class__() if not isinstance(struct, tuple) else []
# create type-dependent generator and addition callback
if isinstance(struct, (list, tuple)):
gen = enumerate(struct)
add = lambda _, value: new_struct.append(value)
elif isinstance(struct, set):
gen = enumerate(struct)
add = lambda _, value: new_struct.add(value)
else: # dict
gen = six.iteritems(struct)
add = lambda key, value: new_struct.__setitem__(key, value)
# recursively fill the new struct
for key, value in gen:
value = map_struct(func, value, cls=cls, map_dict=map_dict, map_list=map_list,
map_tuple=map_tuple, map_set=map_set)
add(key, value)
# convert tuples
if isinstance(struct, tuple):
new_struct = struct.__class__(new_struct)
return new_struct
# when cls is set, just return
elif cls is not None:
return struct
# apply func
else:
return func(struct) | python | def map_struct(func, struct, cls=None, map_dict=True, map_list=True, map_tuple=False,
map_set=False):
"""
Applies a function *func* to each value of a complex structured object *struct* and returns the
output in the same structure. Example:
.. code-block:: python
struct = {"foo": [123, 456], "bar": [{"1": 1}, {"2": 2}]}
def times_two(i):
return i * 2
map_struct(struct, times_two)
# -> {"foo": [246, 912], "bar": [{"1": 2}, {"2": 4}]}
When *cls* is not *None*, it exclusively defines the class of objects that *func* is applied on.
All other objects are unchanged. *map_dict*, *map_list*, *map_tuple* and *map_set* configure if
objects of the respective types are traversed or mapped. The can be booleans or integer values
that define the depth of that setting in the struct.
"""
# interpret generators and views as lists
if is_lazy_iterable(struct):
struct = list(struct)
valid_types = tuple()
if map_dict:
valid_types += (dict,)
if isinstance(map_dict, int) and not isinstance(map_dict, bool):
map_dict -= 1
if map_list:
valid_types += (list,)
if isinstance(map_list, int) and not isinstance(map_list, bool):
map_list -= 1
if map_tuple:
valid_types += (tuple,)
if isinstance(map_tuple, int) and not isinstance(map_tuple, bool):
map_tuple -= 1
if map_set:
valid_types += (set,)
if isinstance(map_set, int) and not isinstance(map_set, bool):
map_set -= 1
# is an instance of cls?
if cls is not None and isinstance(struct, cls):
return func(struct)
# traverse?
elif isinstance(struct, valid_types):
# create a new struct, treat tuples as lists for itertative item appending
new_struct = struct.__class__() if not isinstance(struct, tuple) else []
# create type-dependent generator and addition callback
if isinstance(struct, (list, tuple)):
gen = enumerate(struct)
add = lambda _, value: new_struct.append(value)
elif isinstance(struct, set):
gen = enumerate(struct)
add = lambda _, value: new_struct.add(value)
else: # dict
gen = six.iteritems(struct)
add = lambda key, value: new_struct.__setitem__(key, value)
# recursively fill the new struct
for key, value in gen:
value = map_struct(func, value, cls=cls, map_dict=map_dict, map_list=map_list,
map_tuple=map_tuple, map_set=map_set)
add(key, value)
# convert tuples
if isinstance(struct, tuple):
new_struct = struct.__class__(new_struct)
return new_struct
# when cls is set, just return
elif cls is not None:
return struct
# apply func
else:
return func(struct) | Applies a function *func* to each value of a complex structured object *struct* and returns the
output in the same structure. Example:
.. code-block:: python
struct = {"foo": [123, 456], "bar": [{"1": 1}, {"2": 2}]}
def times_two(i):
return i * 2
map_struct(struct, times_two)
# -> {"foo": [246, 912], "bar": [{"1": 2}, {"2": 4}]}
When *cls* is not *None*, it exclusively defines the class of objects that *func* is applied on.
All other objects are unchanged. *map_dict*, *map_list*, *map_tuple* and *map_set* configure if
objects of the respective types are traversed or mapped. The can be booleans or integer values
that define the depth of that setting in the struct. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L371-L451 |
riga/law | law/util.py | mask_struct | def mask_struct(mask, struct, replace=no_value):
"""
Masks a complex structured object *struct* with a *mask* and returns the remaining values. When
*replace* is set, masked values are replaced with that value instead of being removed. The
*mask* can have a complex structure as well. Examples:
.. code-block:: python
struct = {"a": [1, 2], "b": [3, ["foo", "bar"]]}
# simple example
mask_struct({"a": [False, True], "b": False}, struct)
# => {"a": [2]}
# omitting mask information results in keeping values
mask_struct({"a": [False, True]}, struct)
# => {"a": [2], "b": [3, ["foo", "bar"]]}
"""
# interpret lazy iterables lists
if is_lazy_iterable(struct):
struct = list(struct)
# when mask is a bool, or struct is not a dict or sequence, apply the mask immediately
if isinstance(mask, bool) or not isinstance(struct, (list, tuple, dict)):
return struct if mask else replace
# check list and tuple types
elif isinstance(struct, (list, tuple)) and isinstance(mask, (list, tuple)):
new_struct = []
for i, val in enumerate(struct):
if i >= len(mask):
new_struct.append(val)
else:
repl = replace
if isinstance(replace, (list, tuple)) and len(replace) > i:
repl = replace[i]
val = mask_struct(mask[i], val, replace=repl)
if val != no_value:
new_struct.append(val)
return struct.__class__(new_struct) if new_struct else replace
# check dict types
elif isinstance(struct, dict) and isinstance(mask, dict):
new_struct = struct.__class__()
for key, val in six.iteritems(struct):
if key not in mask:
new_struct[key] = val
else:
repl = replace
if isinstance(replace, dict) and key in replace:
repl = replace[key]
val = mask_struct(mask[key], val, replace=repl)
if val != no_value:
new_struct[key] = val
return new_struct or replace
# when this point is reached, mask and struct have incompatible types
raise TypeError("mask and struct must have the same type, got '{}' and '{}'".format(type(mask),
type(struct))) | python | def mask_struct(mask, struct, replace=no_value):
"""
Masks a complex structured object *struct* with a *mask* and returns the remaining values. When
*replace* is set, masked values are replaced with that value instead of being removed. The
*mask* can have a complex structure as well. Examples:
.. code-block:: python
struct = {"a": [1, 2], "b": [3, ["foo", "bar"]]}
# simple example
mask_struct({"a": [False, True], "b": False}, struct)
# => {"a": [2]}
# omitting mask information results in keeping values
mask_struct({"a": [False, True]}, struct)
# => {"a": [2], "b": [3, ["foo", "bar"]]}
"""
# interpret lazy iterables lists
if is_lazy_iterable(struct):
struct = list(struct)
# when mask is a bool, or struct is not a dict or sequence, apply the mask immediately
if isinstance(mask, bool) or not isinstance(struct, (list, tuple, dict)):
return struct if mask else replace
# check list and tuple types
elif isinstance(struct, (list, tuple)) and isinstance(mask, (list, tuple)):
new_struct = []
for i, val in enumerate(struct):
if i >= len(mask):
new_struct.append(val)
else:
repl = replace
if isinstance(replace, (list, tuple)) and len(replace) > i:
repl = replace[i]
val = mask_struct(mask[i], val, replace=repl)
if val != no_value:
new_struct.append(val)
return struct.__class__(new_struct) if new_struct else replace
# check dict types
elif isinstance(struct, dict) and isinstance(mask, dict):
new_struct = struct.__class__()
for key, val in six.iteritems(struct):
if key not in mask:
new_struct[key] = val
else:
repl = replace
if isinstance(replace, dict) and key in replace:
repl = replace[key]
val = mask_struct(mask[key], val, replace=repl)
if val != no_value:
new_struct[key] = val
return new_struct or replace
# when this point is reached, mask and struct have incompatible types
raise TypeError("mask and struct must have the same type, got '{}' and '{}'".format(type(mask),
type(struct))) | Masks a complex structured object *struct* with a *mask* and returns the remaining values. When
*replace* is set, masked values are replaced with that value instead of being removed. The
*mask* can have a complex structure as well. Examples:
.. code-block:: python
struct = {"a": [1, 2], "b": [3, ["foo", "bar"]]}
# simple example
mask_struct({"a": [False, True], "b": False}, struct)
# => {"a": [2]}
# omitting mask information results in keeping values
mask_struct({"a": [False, True]}, struct)
# => {"a": [2], "b": [3, ["foo", "bar"]]} | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L454-L513 |
riga/law | law/util.py | tmp_file | def tmp_file(*args, **kwargs):
"""
Context manager that generates a temporary file, yields the file descriptor number and temporary
path, and eventually removes the files. All *args* and *kwargs* are passed to
:py:meth:`tempfile.mkstemp`.
"""
fileno, path = tempfile.mkstemp(*args, **kwargs)
# create the file
with open(path, "w") as f:
f.write("")
# yield it
try:
yield fileno, path
finally:
if os.path.exists(path):
os.remove(path) | python | def tmp_file(*args, **kwargs):
"""
Context manager that generates a temporary file, yields the file descriptor number and temporary
path, and eventually removes the files. All *args* and *kwargs* are passed to
:py:meth:`tempfile.mkstemp`.
"""
fileno, path = tempfile.mkstemp(*args, **kwargs)
# create the file
with open(path, "w") as f:
f.write("")
# yield it
try:
yield fileno, path
finally:
if os.path.exists(path):
os.remove(path) | Context manager that generates a temporary file, yields the file descriptor number and temporary
path, and eventually removes the files. All *args* and *kwargs* are passed to
:py:meth:`tempfile.mkstemp`. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L517-L534 |
riga/law | law/util.py | interruptable_popen | def interruptable_popen(*args, **kwargs):
"""
Shorthand to :py:class:`Popen` followed by :py:meth:`Popen.communicate`. All *args* and *kwargs*
are forwatded to the :py:class:`Popen` constructor. The return code, standard output and
standard error are returned in a tuple. The call :py:meth:`Popen.communicate` is interruptable
by the user.
"""
kwargs["preexec_fn"] = os.setsid
p = subprocess.Popen(*args, **kwargs)
try:
out, err = p.communicate()
except KeyboardInterrupt:
os.killpg(os.getpgid(p.pid), signal.SIGTERM)
raise
if six.PY3:
if out is not None:
out = out.decode("utf-8")
if err is not None:
err = err.decode("utf-8")
return p.returncode, out, err | python | def interruptable_popen(*args, **kwargs):
"""
Shorthand to :py:class:`Popen` followed by :py:meth:`Popen.communicate`. All *args* and *kwargs*
are forwatded to the :py:class:`Popen` constructor. The return code, standard output and
standard error are returned in a tuple. The call :py:meth:`Popen.communicate` is interruptable
by the user.
"""
kwargs["preexec_fn"] = os.setsid
p = subprocess.Popen(*args, **kwargs)
try:
out, err = p.communicate()
except KeyboardInterrupt:
os.killpg(os.getpgid(p.pid), signal.SIGTERM)
raise
if six.PY3:
if out is not None:
out = out.decode("utf-8")
if err is not None:
err = err.decode("utf-8")
return p.returncode, out, err | Shorthand to :py:class:`Popen` followed by :py:meth:`Popen.communicate`. All *args* and *kwargs*
are forwatded to the :py:class:`Popen` constructor. The return code, standard output and
standard error are returned in a tuple. The call :py:meth:`Popen.communicate` is interruptable
by the user. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L537-L560 |
riga/law | law/util.py | readable_popen | def readable_popen(*args, **kwargs):
"""
Shorthand to :py:class:`Popen` which yields the output live line-by-line. All *args* and
*kwargs* are forwatded to the :py:class:`Popen` constructor. When EOF is reached,
``communicate()`` is called on the subprocess and it is yielded. Example:
.. code-block:: python
for line in readable_popen(["some_executable", "--args"]):
if isinstance(line, str):
print(line)
else:
process = line
if process.returncode != 0:
raise Exception("complain ...")
"""
# force pipes
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.STDOUT
p = subprocess.Popen(*args, **kwargs)
for line in iter(lambda: p.stdout.readline(), ""):
if six.PY3:
line = line.decode("utf-8")
yield line.rstrip()
# yield the process itself in the end
p.communicate()
yield p | python | def readable_popen(*args, **kwargs):
"""
Shorthand to :py:class:`Popen` which yields the output live line-by-line. All *args* and
*kwargs* are forwatded to the :py:class:`Popen` constructor. When EOF is reached,
``communicate()`` is called on the subprocess and it is yielded. Example:
.. code-block:: python
for line in readable_popen(["some_executable", "--args"]):
if isinstance(line, str):
print(line)
else:
process = line
if process.returncode != 0:
raise Exception("complain ...")
"""
# force pipes
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.STDOUT
p = subprocess.Popen(*args, **kwargs)
for line in iter(lambda: p.stdout.readline(), ""):
if six.PY3:
line = line.decode("utf-8")
yield line.rstrip()
# yield the process itself in the end
p.communicate()
yield p | Shorthand to :py:class:`Popen` which yields the output live line-by-line. All *args* and
*kwargs* are forwatded to the :py:class:`Popen` constructor. When EOF is reached,
``communicate()`` is called on the subprocess and it is yielded. Example:
.. code-block:: python
for line in readable_popen(["some_executable", "--args"]):
if isinstance(line, str):
print(line)
else:
process = line
if process.returncode != 0:
raise Exception("complain ...") | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L563-L592 |
riga/law | law/util.py | create_hash | def create_hash(inp, l=10, algo="sha256"):
"""
Takes an input *inp* and creates a hash based on an algorithm *algo*. For valid algorithms, see
python's hashlib. *l* corresponds to the maximum length of the returned hash. Internally, the
string representation of *inp* is used.
"""
return getattr(hashlib, algo)(six.b(str(inp))).hexdigest()[:l] | python | def create_hash(inp, l=10, algo="sha256"):
"""
Takes an input *inp* and creates a hash based on an algorithm *algo*. For valid algorithms, see
python's hashlib. *l* corresponds to the maximum length of the returned hash. Internally, the
string representation of *inp* is used.
"""
return getattr(hashlib, algo)(six.b(str(inp))).hexdigest()[:l] | Takes an input *inp* and creates a hash based on an algorithm *algo*. For valid algorithms, see
python's hashlib. *l* corresponds to the maximum length of the returned hash. Internally, the
string representation of *inp* is used. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L595-L601 |
riga/law | law/util.py | copy_no_perm | def copy_no_perm(src, dst):
"""
Copies a file from *src* to *dst* including meta data except for permission bits.
"""
shutil.copy(src, dst)
perm = os.stat(dst).st_mode
shutil.copystat(src, dst)
os.chmod(dst, perm) | python | def copy_no_perm(src, dst):
"""
Copies a file from *src* to *dst* including meta data except for permission bits.
"""
shutil.copy(src, dst)
perm = os.stat(dst).st_mode
shutil.copystat(src, dst)
os.chmod(dst, perm) | Copies a file from *src* to *dst* including meta data except for permission bits. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L604-L611 |
riga/law | law/util.py | makedirs_perm | def makedirs_perm(path, perm=None):
"""
Recursively creates directory up to *path*. If *perm* is set, the permissions of all newly
created directories are set to its value.
"""
if not os.path.exists(path):
if perm is None:
os.makedirs(path)
else:
umask = os.umask(0)
try:
os.makedirs(path, perm)
finally:
os.umask(umask) | python | def makedirs_perm(path, perm=None):
"""
Recursively creates directory up to *path*. If *perm* is set, the permissions of all newly
created directories are set to its value.
"""
if not os.path.exists(path):
if perm is None:
os.makedirs(path)
else:
umask = os.umask(0)
try:
os.makedirs(path, perm)
finally:
os.umask(umask) | Recursively creates directory up to *path*. If *perm* is set, the permissions of all newly
created directories are set to its value. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L614-L627 |
riga/law | law/util.py | user_owns_file | def user_owns_file(path, uid=None):
"""
Returns whether a file located at *path* is owned by the user with *uid*. When *uid* is *None*,
the user id of the current process is used.
"""
if uid is None:
uid = os.getuid()
path = os.path.expandvars(os.path.expanduser(path))
return os.stat(path).st_uid == uid | python | def user_owns_file(path, uid=None):
"""
Returns whether a file located at *path* is owned by the user with *uid*. When *uid* is *None*,
the user id of the current process is used.
"""
if uid is None:
uid = os.getuid()
path = os.path.expandvars(os.path.expanduser(path))
return os.stat(path).st_uid == uid | Returns whether a file located at *path* is owned by the user with *uid*. When *uid* is *None*,
the user id of the current process is used. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L630-L638 |
riga/law | law/util.py | iter_chunks | def iter_chunks(l, size):
"""
Returns a generator containing chunks of *size* of a list, integer or generator *l*. A *size*
smaller than 1 results in no chunking at all.
"""
if isinstance(l, six.integer_types):
l = six.moves.range(l)
if is_lazy_iterable(l):
if size < 1:
yield list(l)
else:
chunk = []
for elem in l:
if len(chunk) < size:
chunk.append(elem)
else:
yield chunk
chunk = [elem]
else:
if chunk:
yield chunk
else:
if size < 1:
yield l
else:
for i in six.moves.range(0, len(l), size):
yield l[i:i + size] | python | def iter_chunks(l, size):
"""
Returns a generator containing chunks of *size* of a list, integer or generator *l*. A *size*
smaller than 1 results in no chunking at all.
"""
if isinstance(l, six.integer_types):
l = six.moves.range(l)
if is_lazy_iterable(l):
if size < 1:
yield list(l)
else:
chunk = []
for elem in l:
if len(chunk) < size:
chunk.append(elem)
else:
yield chunk
chunk = [elem]
else:
if chunk:
yield chunk
else:
if size < 1:
yield l
else:
for i in six.moves.range(0, len(l), size):
yield l[i:i + size] | Returns a generator containing chunks of *size* of a list, integer or generator *l*. A *size*
smaller than 1 results in no chunking at all. | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L641-L669 |
riga/law | law/util.py | human_bytes | def human_bytes(n, unit=None):
"""
Takes a number of bytes *n*, assigns the best matching unit and returns the respective number
and unit string in a tuple. When *unit* is set, that unit is used. Example:
.. code-block:: python
human_bytes(3407872)
# -> (3.25, "MB")
human_bytes(3407872, "kB")
# -> (3328.0, "kB")
"""
if n == 0:
idx = 0
elif unit:
idx = byte_units.index(unit)
else:
idx = int(math.floor(math.log(abs(n), 1024)))
idx = min(idx, len(byte_units))
return n / 1024. ** idx, byte_units[idx] | python | def human_bytes(n, unit=None):
"""
Takes a number of bytes *n*, assigns the best matching unit and returns the respective number
and unit string in a tuple. When *unit* is set, that unit is used. Example:
.. code-block:: python
human_bytes(3407872)
# -> (3.25, "MB")
human_bytes(3407872, "kB")
# -> (3328.0, "kB")
"""
if n == 0:
idx = 0
elif unit:
idx = byte_units.index(unit)
else:
idx = int(math.floor(math.log(abs(n), 1024)))
idx = min(idx, len(byte_units))
return n / 1024. ** idx, byte_units[idx] | Takes a number of bytes *n*, assigns the best matching unit and returns the respective number
and unit string in a tuple. When *unit* is set, that unit is used. Example:
.. code-block:: python
human_bytes(3407872)
# -> (3.25, "MB")
human_bytes(3407872, "kB")
# -> (3328.0, "kB") | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L675-L695 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.