Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def set_brightness(self, brightness):
"""
Set the brightness level for the entire display
@param brightness: brightness level (0 -15)
"""
if brightness > 15:
brightness = 15
brightness |= 0xE0
self.brightness = brightness
self.firmata.i2c_write(0x70, brightness) |
def set_bit_map(self, shape, color):
"""
Populate the bit map with the supplied "shape" and color
and then write the entire bitmap to the display
@param shape: pattern to display
@param color: color for the pattern
"""
for row in range(0, 8):
data = shape[row]
# shift data into buffer
bit_mask = 0x80
for column in range(0, 8):
if data & bit_mask:
self.set_pixel(row, column, color, True)
bit_mask >>= 1
self.output_entire_buffer() |
def output_entire_buffer(self):
"""
Write the entire buffer to the display
"""
green = 0
red = 0
for row in range(0, 8):
for col in range(0, 8):
if self.display_buffer[row][col] == self.LED_GREEN:
green |= 1 << col
elif self.display_buffer[row][col] == self.LED_RED:
red |= 1 << col
elif self.display_buffer[row][col] == self.LED_YELLOW:
green |= 1 << col
red |= 1 << col
elif self.display_buffer[row][col] == self.LED_OFF:
green &= ~(1 << col)
red &= ~(1 << col)
self.firmata.i2c_write(0x70, row * 2, 0, green)
self.firmata.i2c_write(0x70, row * 2 + 1, 0, red) |
def clear_display_buffer(self):
"""
Set all led's to off.
"""
for row in range(0, 8):
self.firmata.i2c_write(0x70, row * 2, 0, 0)
self.firmata.i2c_write(0x70, (row * 2) + 1, 0, 0)
for column in range(0, 8):
self.display_buffer[row][column] = 0 |
def auto_discover_board(self, verbose):
"""
This method will allow up to 30 seconds for discovery (communicating with) an Arduino board
and then will determine a pin configuration table for the board.
:return: True if board is successfully discovered or False upon timeout
"""
# get current time
start_time = time.time()
# wait for up to 30 seconds for a successful capability query to occur
while len(self.analog_mapping_query_results) == 0:
if time.time() - start_time > 30:
return False
# keep sending out a capability query until there is a response
self.send_sysex(self.ANALOG_MAPPING_QUERY)
time.sleep(.1)
if verbose:
print("Board initialized in %d seconds" % (time.time() - start_time))
for pin in self.analog_mapping_query_results:
self.total_pins_discovered += 1
# non analog pins will be marked as IGNORE
if pin != self.pymata.IGNORE:
self.number_of_analog_pins_discovered += 1
if verbose:
print('Total Number of Pins Detected = %d' % self.total_pins_discovered)
print('Total Number of Analog Pins Detected = %d' % self.number_of_analog_pins_discovered)
# response table initialization
# for each pin set the mode to input and the last read data value to zero
for pin in range(0, self.total_pins_discovered):
response_entry = [self.pymata.INPUT, 0, None]
self.digital_response_table.append(response_entry)
for pin in range(0, self.number_of_analog_pins_discovered):
response_entry = [self.pymata.INPUT, 0, None]
self.analog_response_table.append(response_entry)
# set up latching tables
for pin in range(0, self.total_pins_discovered):
digital_latch_table_entry = [0, 0, 0, 0, None]
self.digital_latch_table.append(digital_latch_table_entry)
for pin in range(0, self.number_of_analog_pins_discovered):
analog_latch_table_entry = [0, 0, 0, 0, 0, None]
self.analog_latch_table.append(analog_latch_table_entry)
return True |
def report_version(self, data):
"""
This method processes the report version message, sent asynchronously by Firmata when it starts up
or after refresh_report_version() is called
Use the api method api_get_version to retrieve this information
:param data: Message data from Firmata
:return: No return value.
"""
self.firmata_version.append(data[0]) # add major
self.firmata_version.append(data[1]) |
def set_analog_latch(self, pin, threshold_type, threshold_value, cb):
"""
This method "arms" a pin to allow data latching for the pin.
:param pin: Analog pin number (value following an 'A' designator, i.e. A5 = 5
:param threshold_type: ANALOG_LATCH_GT | ANALOG_LATCH_LT | ANALOG_LATCH_GTE | ANALOG_LATCH_LTE
:param threshold_value: numerical value
:param cb: User provided callback function
"""
with self.pymata.data_lock:
self.analog_latch_table[pin] = [self.LATCH_ARMED, threshold_type, threshold_value, 0, 0, cb] |
def set_digital_latch(self, pin, threshold_type, cb):
"""
This method "arms" a pin to allow data latching for the pin.
:param pin: digital pin number
:param threshold_type: DIGITAL_LATCH_HIGH | DIGITAL_LATCH_LOW
:param cb: User provided callback function
"""
with self.pymata.data_lock:
self.digital_latch_table[pin] = [self.LATCH_ARMED, threshold_type, 0, 0, cb] |
def get_analog_latch_data(self, pin):
"""
This method reads the analog latch table for the specified pin and returns a list that contains:
[latch_state, latched_data, and time_stamp].
If the latch state is latched, the entry in the table is cleared
:param pin: pin number
:return: [latch_state, latched_data, and time_stamp]
"""
with self.pymata.data_lock:
pin_data = self.analog_latch_table[pin]
current_latch_data = [pin,
pin_data[self.LATCH_STATE],
pin_data[self.ANALOG_LATCHED_DATA],
pin_data[self.ANALOG_TIME_STAMP],
pin_data[self.ANALOG_LATCH_CALLBACK]]
# if this is latched data, clear the latch table entry for this pin
if pin_data[self.LATCH_STATE] == self.LATCH_LATCHED:
self.analog_latch_table[pin] = [0, 0, 0, 0, 0, None]
return current_latch_data |
def get_digital_latch_data(self, pin):
"""
This method reads the digital latch table for the specified pin and returns a list that contains:
[latch_state, latched_data, and time_stamp].
If the latch state is latched, the entry in the table is cleared
:param pin: pin number
:return: [latch_state, latched_data, and time_stamp]
"""
with self.pymata.data_lock:
pin_data = self.digital_latch_table[pin]
current_latch_data = [pin,
pin_data[self.LATCH_STATE],
pin_data[self.DIGITAL_LATCHED_DATA],
pin_data[self.DIGITAL_TIME_STAMP],
pin_data[self.DIGITAL_LATCH_CALLBACK]]
if pin_data[self.LATCH_STATE] == self.LATCH_LATCHED:
self.digital_latch_table[pin] = [0, 0, 0, 0, None]
return current_latch_data |
def report_firmware(self, data):
"""
This method processes the report firmware message, sent asynchronously by Firmata when it starts up
or after refresh_report_firmware() is called
Use the api method api_get_firmware_version to retrieve this information
:param data: Message data from Firmata
:return: No return value.
"""
self.firmata_firmware.append(data[0]) # add major
self.firmata_firmware.append(data[1]) # add minor
# extract the file name string from the message
# file name is in bytes 2 to the end
name_data = data[2:]
# constructed file name
file_name = []
# the file name is passed in with each character as 2 bytes, the high order byte is equal to 0
# so skip over these zero bytes
for i in name_data[::2]:
file_name.append(chr(i))
# add filename to tuple
self.firmata_firmware.append("".join(file_name)) |
def analog_message(self, data):
"""
This method handles the incoming analog data message.
It stores the data value for the pin in the analog response table.
If a callback function was associated with this pin, the callback function is invoked.
This method also checks to see if latching was requested for the pin. If the latch criteria was met,
the latching table is updated. If a latching callback function was provided by the user, a latching
notification callback message is sent to the user in place of updating the latching table.
:param data: Message data from Firmata
:return: No return value.
"""
with self.pymata.data_lock:
# hold on to the previous value
previous_value = \
self.analog_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE]
self.analog_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE] \
= (data[self.MSB] << 7) + data[self.LSB]
pin = data[0]
pin_response_data_data = self.analog_response_table[pin]
value = pin_response_data_data[self.RESPONSE_TABLE_PIN_DATA_VALUE]
# check to see if there is a callback function attached to this pin
callback = self.analog_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_CALLBACK]
# send the pin mode, pin number, and current data value
if callback is not None:
if value != previous_value:
# has the value changed since the last report
callback([self.pymata.ANALOG, pin, value])
# check if data is to be latched
# get the analog latching table entry for this pin
latching_entry = self.analog_latch_table[pin]
if latching_entry[self.LATCH_STATE] == self.LATCH_ARMED:
# Has the latching criteria been met
if latching_entry[self.LATCHED_THRESHOLD_TYPE] == self.ANALOG_LATCH_GT:
if value > latching_entry[self.ANALOG_LATCH_DATA_TARGET]:
if latching_entry[self.ANALOG_LATCH_CALLBACK] is not None:
self.analog_latch_table[pin] = [0, 0, 0, 0, 0, None]
latching_entry[self.ANALOG_LATCH_CALLBACK]([self.pymata.ANALOG | self.pymata.LATCH_MODE,
pin, value, time.time()])
else:
updated_latch_entry = latching_entry
updated_latch_entry[self.LATCH_STATE] = self.LATCH_LATCHED
updated_latch_entry[self.ANALOG_LATCHED_DATA] = value
# time stamp it
updated_latch_entry[self.ANALOG_TIME_STAMP] = time.time()
self.analog_latch_table[pin] = updated_latch_entry
else:
pass # haven't hit target
elif latching_entry[self.LATCHED_THRESHOLD_TYPE] == self.ANALOG_LATCH_GTE:
if value >= latching_entry[self.ANALOG_LATCH_DATA_TARGET]:
if latching_entry[self.ANALOG_LATCH_CALLBACK] is not None:
self.analog_latch_table[pin] = [0, 0, 0, 0, 0, None]
latching_entry[self.ANALOG_LATCH_CALLBACK]([self.pymata.ANALOG | self.pymata.LATCH_MODE,
pin, value, time.time()])
else:
updated_latch_entry = latching_entry
updated_latch_entry[self.LATCH_STATE] = self.LATCH_LATCHED
updated_latch_entry[self.ANALOG_LATCHED_DATA] = value
# time stamp it
updated_latch_entry[self.ANALOG_TIME_STAMP] = time.time()
self.analog_latch_table[pin] = updated_latch_entry
else:
pass # haven't hit target:
elif latching_entry[self.LATCHED_THRESHOLD_TYPE] == self.ANALOG_LATCH_LT:
if value < latching_entry[self.ANALOG_LATCH_DATA_TARGET]:
if latching_entry[self.ANALOG_LATCH_CALLBACK] is not None:
latching_entry[self.ANALOG_LATCH_CALLBACK]([self.pymata.ANALOG | self.pymata.LATCH_MODE,
pin, value, time.time()])
self.analog_latch_table[pin] = [0, 0, 0, 0, 0, None]
else:
updated_latch_entry = latching_entry
updated_latch_entry[self.LATCH_STATE] = self.LATCH_LATCHED
updated_latch_entry[self.ANALOG_LATCHED_DATA] = value
# time stamp it
updated_latch_entry[self.ANALOG_TIME_STAMP] = time.time()
self.analog_latch_table[pin] = updated_latch_entry
else:
pass # haven't hit target:
elif latching_entry[self.LATCHED_THRESHOLD_TYPE] == self.ANALOG_LATCH_LTE:
if value <= latching_entry[self.ANALOG_LATCH_DATA_TARGET]:
if latching_entry[self.ANALOG_LATCH_CALLBACK] is not None:
latching_entry[self.ANALOG_LATCH_CALLBACK]([self.pymata.ANALOG | self.pymata.LATCH_MODE,
pin, value, time.time()])
self.analog_latch_table[pin] = [0, 0, 0, 0, 0, None]
else:
updated_latch_entry = latching_entry
updated_latch_entry[self.LATCH_STATE] = self.LATCH_LATCHED
updated_latch_entry[self.ANALOG_LATCHED_DATA] = value
# time stamp it
updated_latch_entry[self.ANALOG_TIME_STAMP] = time.time()
self.analog_latch_table[pin] = updated_latch_entry
else:
pass # haven't hit target:
else:
pass |
def digital_message(self, data):
"""
This method handles the incoming digital message.
It stores the data values in the digital response table.
Data is stored for all 8 bits of a digital port
:param data: Message data from Firmata
:return: No return value.
"""
port = data[0]
port_data = (data[self.MSB] << 7) + data[self.LSB]
# set all the pins for this reporting port
# get the first pin number for this report
pin = port * 8
for pin in range(pin, min(pin + 8, self.total_pins_discovered)):
# shift through all the bit positions and set the digital response table
with self.pymata.data_lock:
# look at the previously stored value for this pin
prev_data = self.digital_response_table[pin][self.RESPONSE_TABLE_PIN_DATA_VALUE]
# get the current value
self.digital_response_table[pin][self.RESPONSE_TABLE_PIN_DATA_VALUE] = port_data & 0x01
# if the values differ and callback is enabled for the pin, then send out the callback
if prev_data != port_data & 0x01:
callback = self.digital_response_table[pin][self.RESPONSE_TABLE_CALLBACK]
if callback:
callback([self.pymata.DIGITAL, pin,
self.digital_response_table[pin][self.RESPONSE_TABLE_PIN_DATA_VALUE]])
# determine if the latch data table needs to be updated for each pin
latching_entry = self.digital_latch_table[pin]
if latching_entry[self.LATCH_STATE] == self.LATCH_ARMED:
if latching_entry[self.LATCHED_THRESHOLD_TYPE] == self.DIGITAL_LATCH_LOW:
if (port_data & 0x01) == 0:
if latching_entry[self.DIGITAL_LATCH_CALLBACK] is not None:
self.digital_latch_table[pin] = [0, 0, 0, 0, None]
latching_entry[self.DIGITAL_LATCH_CALLBACK](
[self.pymata.OUTPUT | self.pymata.LATCH_MODE,
pin, 0, time.time()])
else:
updated_latch_entry = latching_entry
updated_latch_entry[self.LATCH_STATE] = self.LATCH_LATCHED
updated_latch_entry[self.DIGITAL_LATCHED_DATA] = self.DIGITAL_LATCH_LOW
# time stamp it
updated_latch_entry[self.DIGITAL_TIME_STAMP] = time.time()
else:
pass
elif latching_entry[self.LATCHED_THRESHOLD_TYPE] == self.DIGITAL_LATCH_HIGH:
if port_data & 0x01:
if latching_entry[self.DIGITAL_LATCH_CALLBACK] is not None:
self.digital_latch_table[pin] = [0, 0, 0, 0, None]
latching_entry[self.DIGITAL_LATCH_CALLBACK](
[self.pymata.OUTPUT | self.pymata.LATCH_MODE,
pin, 1, time.time()])
else:
updated_latch_entry = latching_entry
updated_latch_entry[self.LATCH_STATE] = self.LATCH_LATCHED
updated_latch_entry[self.DIGITAL_LATCHED_DATA] = self.DIGITAL_LATCH_HIGH
# time stamp it
updated_latch_entry[self.DIGITAL_TIME_STAMP] = time.time()
else:
pass
else:
pass
# get the next data bit
port_data >>= 1 |
def encoder_data(self, data):
"""
This method handles the incoming encoder data message and stores
the data in the digital response table.
:param data: Message data from Firmata
:return: No return value.
"""
prev_val = self.digital_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE]
val = int((data[self.MSB] << 7) + data[self.LSB])
# set value so that it shows positive and negative values
if val > 8192:
val -= 16384
pin = data[0]
with self.pymata.data_lock:
self.digital_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE] = val
if prev_val != val:
callback = self.digital_response_table[pin][self.RESPONSE_TABLE_CALLBACK]
if callback is not None:
callback([self.pymata.ENCODER, pin,
self.digital_response_table[pin][self.RESPONSE_TABLE_PIN_DATA_VALUE]]) |
def sonar_data(self, data):
"""
This method handles the incoming sonar data message and stores
the data in the response table.
:param data: Message data from Firmata
:return: No return value.
"""
val = int((data[self.MSB] << 7) + data[self.LSB])
pin_number = data[0]
with self.pymata.data_lock:
sonar_pin_entry = self.active_sonar_map[pin_number]
# also write it into the digital response table
self.digital_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE] = val
# send data through callback if there is a callback function for the pin
if sonar_pin_entry[0] is not None:
# check if value changed since last reading
if sonar_pin_entry[1] != val:
self.active_sonar_map[pin_number][0]([self.pymata.SONAR, pin_number, val])
# update the data in the table with latest value
sonar_pin_entry[1] = val
self.active_sonar_map[pin_number] = sonar_pin_entry |
def send_sysex(self, sysex_command, sysex_data=None):
"""
This method will send a Sysex command to Firmata with any accompanying data
:param sysex_command: sysex command
:param sysex_data: data for command
:return : No return value.
"""
if not sysex_data:
sysex_data = []
# convert the message command and data to characters
sysex_message = chr(self.START_SYSEX)
sysex_message += chr(sysex_command)
if len(sysex_data):
for d in sysex_data:
sysex_message += chr(d)
sysex_message += chr(self.END_SYSEX)
for data in sysex_message:
self.pymata.transport.write(data) |
def send_command(self, command):
"""
This method is used to transmit a non-sysex command.
:param command: Command to send to firmata includes command + data formatted by caller
:return : No return value.
"""
send_message = ""
for i in command:
send_message += chr(i)
for data in send_message:
self.pymata.transport.write(data) |
def system_reset(self):
"""
Send the reset command to the Arduino.
It resets the response tables to their initial values
:return: No return value
"""
data = chr(self.SYSTEM_RESET)
self.pymata.transport.write(data)
# response table re-initialization
# for each pin set the mode to input and the last read data value to zero
with self.pymata.data_lock:
# remove all old entries from existing tables
for _ in range(len(self.digital_response_table)):
self.digital_response_table.pop()
for _ in range(len(self.analog_response_table)):
self.analog_response_table.pop()
# reinitialize tables
for pin in range(0, self.total_pins_discovered):
response_entry = [self.pymata.INPUT, 0, None]
self.digital_response_table.append(response_entry)
for pin in range(0, self.number_of_analog_pins_discovered):
response_entry = [self.pymata.INPUT, 0, None]
self.analog_response_table.append(response_entry) |
def _string_data(self, data):
"""
This method handles the incoming string data message from Firmata.
The string is printed to the console
:param data: Message data from Firmata
:return: No return value.s
"""
print("_string_data:")
string_to_print = []
for i in data[::2]:
string_to_print.append(chr(i))
print("".join(string_to_print)) |
def i2c_reply(self, data):
"""
This method receives replies to i2c_read requests. It stores the data for each i2c device
address in a dictionary called i2c_map. The data is retrieved via a call to i2c_get_read_data()
in pymata.py
It a callback was specified in pymata.i2c_read, the raw data is sent through the callback
:param data: raw data returned from i2c device
"""
reply_data = []
address = (data[0] & 0x7f) + (data[1] << 7)
register = data[2] & 0x7f + data[3] << 7
reply_data.append(register)
for i in range(4, len(data), 2):
data_item = (data[i] & 0x7f) + (data[i + 1] << 7)
reply_data.append(data_item)
# retrieve the data entry for this address from the i2c map
if address in self.i2c_map:
i2c_data = self.i2c_map.get(address, None)
i2c_data[1] = reply_data
self.i2c_map[address] = i2c_data
# is there a call back for this entry?
# if yes, return a list of bytes through the callback
if i2c_data[0] is not None:
i2c_data[0]([self.pymata.I2C, address, reply_data]) |
def run(self):
"""
This method starts the thread that continuously runs to receive and interpret
messages coming from Firmata. This must be the last method in this file
It also checks the deque for messages to be sent to Firmata.
"""
# To add a command to the command dispatch table, append here.
self.command_dispatch.update({self.REPORT_VERSION: [self.report_version, 2]})
self.command_dispatch.update({self.REPORT_FIRMWARE: [self.report_firmware, 1]})
self.command_dispatch.update({self.ANALOG_MESSAGE: [self.analog_message, 2]})
self.command_dispatch.update({self.DIGITAL_MESSAGE: [self.digital_message, 2]})
self.command_dispatch.update({self.ENCODER_DATA: [self.encoder_data, 3]})
self.command_dispatch.update({self.SONAR_DATA: [self.sonar_data, 3]})
self.command_dispatch.update({self.STRING_DATA: [self._string_data, 2]})
self.command_dispatch.update({self.I2C_REPLY: [self.i2c_reply, 2]})
self.command_dispatch.update({self.CAPABILITY_RESPONSE: [self.capability_response, 2]})
self.command_dispatch.update({self.PIN_STATE_RESPONSE: [self.pin_state_response, 2]})
self.command_dispatch.update({self.ANALOG_MAPPING_RESPONSE: [self.analog_mapping_response, 2]})
self.command_dispatch.update({self.STEPPER_DATA: [self.stepper_version_response, 2]})
while not self.is_stopped():
if len(self.pymata.command_deque):
# get next byte from the deque and process it
data = self.pymata.command_deque.popleft()
# this list will be populated with the received data for the command
command_data = []
# process sysex commands
if data == self.START_SYSEX:
# next char is the actual sysex command
# wait until we can get data from the deque
while len(self.pymata.command_deque) == 0:
pass
sysex_command = self.pymata.command_deque.popleft()
# retrieve the associated command_dispatch entry for this command
dispatch_entry = self.command_dispatch.get(sysex_command)
# get a "pointer" to the method that will process this command
method = dispatch_entry[0]
# now get the rest of the data excluding the END_SYSEX byte
end_of_sysex = False
while not end_of_sysex:
# wait for more data to arrive
while len(self.pymata.command_deque) == 0:
pass
data = self.pymata.command_deque.popleft()
if data != self.END_SYSEX:
command_data.append(data)
else:
end_of_sysex = True
# invoke the method to process the command
method(command_data)
# go to the beginning of the loop to process the next command
continue
# is this a command byte in the range of 0x80-0xff - these are the non-sysex messages
elif 0x80 <= data <= 0xff:
# look up the method for the command in the command dispatch table
# for the digital reporting the command value is modified with port number
# the handler needs the port to properly process, so decode that from the command and
# place in command_data
if 0x90 <= data <= 0x9f:
port = data & 0xf
command_data.append(port)
data = 0x90
# the pin number for analog data is embedded in the command so, decode it
elif 0xe0 <= data <= 0xef:
pin = data & 0xf
command_data.append(pin)
data = 0xe0
else:
pass
dispatch_entry = self.command_dispatch.get(data)
# this calls the method retrieved from the dispatch table
method = dispatch_entry[0]
# get the number of parameters that this command provides
num_args = dispatch_entry[1]
# look at the number of args that the selected method requires
# now get that number of bytes to pass to the called method
for i in range(num_args):
while len(self.pymata.command_deque) == 0:
pass
data = self.pymata.command_deque.popleft()
command_data.append(data)
# go execute the command with the argument list
method(command_data)
# go to the beginning of the loop to process the next command
continue
else:
time.sleep(.1) |
def retrieve_url(self, url):
"""
Use requests to fetch remote content
"""
try:
r = requests.get(url)
except requests.ConnectionError:
raise exceptions.RetrieveError('Connection fail')
if r.status_code >= 400:
raise exceptions.RetrieveError('Connected, but status code is %s' % (r.status_code))
real_url = r.url
content = r.content
try:
content_type = r.headers['Content-Type']
except KeyError:
content_type, encoding = mimetypes.guess_type(real_url, strict=False)
self.response = r
return content_type.lower(), content |
def parse_html(self, html):
"""
Use BeautifulSoup to parse HTML / XML
http://www.crummy.com/software/BeautifulSoup/bs4/doc/#specifying-the-parser-to-use
"""
soup = BeautifulSoup(html, self.parser)
title_tag = soup.find('title')
self.result.title = title_tag.string if title_tag else None
self.soup = soup
return soup |
def image_urls(self):
"""
Combine finder_image_urls and extender_image_urls,
remove duplicate but keep order
"""
all_image_urls = self.finder_image_urls[:]
for image_url in self.extender_image_urls:
if image_url not in all_image_urls:
all_image_urls.append(image_url)
return all_image_urls |
def ggpht_s1600_extender(pipeline_index,
finder_image_urls,
extender_image_urls=[],
*args, **kwargs):
"""
Example:
http://lh4.ggpht.com/-fFi-qJRuxeY/UjwHSOTHGOI/AAAAAAAArgE/SWTMT-hXzB4/s640/Celeber-ru-Emma-Watson-Net-A-Porter-The-Edit-Magazine-Photoshoot-2013-01.jpg
to
http://lh4.ggpht.com/-fFi-qJRuxeY/UjwHSOTHGOI/AAAAAAAArgE/SWTMT-hXzB4/s1600/Celeber-ru-Emma-Watson-Net-A-Porter-The-Edit-Magazine-Photoshoot-2013-01.jpg
"""
now_extender_image_urls = []
search_re = re.compile(r'/s\d+/', re.IGNORECASE)
for image_url in finder_image_urls:
if 'ggpht.com/' in image_url.lower():
if search_re.search(image_url):
extender_image_url = search_re.sub('/s1600/', image_url)
now_extender_image_urls.append(extender_image_url)
output = {}
output['extender_image_urls'] = extender_image_urls + now_extender_image_urls
return output |
def background_image_finder(pipeline_index,
soup,
finder_image_urls=[],
*args, **kwargs):
"""
Find image URL in background-image
Example:
<div style="width: 100%; height: 100%; background-image: url(http://distilleryimage10.ak.instagram.com/bde04558a43b11e28e5d22000a1f979a_7.jpg);" class="Image iLoaded iWithTransition Frame" src="http://distilleryimage10.ak.instagram.com/bde04558a43b11e28e5d22000a1f979a_7.jpg"></div>
to
http://distilleryimage10.ak.instagram.com/bde04558a43b11e28e5d22000a1f979a_7.jpg
"""
now_finder_image_urls = []
for tag in soup.find_all(style=True):
style_string = tag['style']
if 'background-image' in style_string.lower():
style = cssutils.parseStyle(style_string)
background_image = style.getProperty('background-image')
if background_image:
for property_value in background_image.propertyValue:
background_image_url = str(property_value.value)
if background_image_url:
if (background_image_url not in finder_image_urls) and \
(background_image_url not in now_finder_image_urls):
now_finder_image_urls.append(background_image_url)
output = {}
output['finder_image_urls'] = finder_image_urls + now_finder_image_urls
return output |
def img_src_finder(pipeline_index,
soup,
finder_image_urls=[],
*args, **kwargs):
"""
Find image URL in <img>'s src attribute
"""
now_finder_image_urls = []
for img in soup.find_all('img'):
src = img.get('src', None)
if src:
src = str(src)
if (src not in finder_image_urls) and \
(src not in now_finder_image_urls):
now_finder_image_urls.append(src)
output = {}
output['finder_image_urls'] = finder_image_urls + now_finder_image_urls
return output |
def a_href_finder(pipeline_index,
soup,
finder_image_urls=[],
*args, **kwargs):
"""
Find image URL in <a>'s href attribute
"""
now_finder_image_urls = []
for a in soup.find_all('a'):
href = a.get('href', None)
if href:
href = str(href)
if filter(href.lower().endswith, ('.jpg', '.jpeg', '.gif', '.png')):
if (href not in finder_image_urls) and \
(href not in now_finder_image_urls):
now_finder_image_urls.append(href)
output = {}
output['finder_image_urls'] = finder_image_urls + now_finder_image_urls
return output |
def _getnodenamefor(self, name):
"Return the node name where the ``name`` would land to"
return 'node_' + str(
(abs(binascii.crc32(b(name)) & 0xffffffff) % self.no_servers) + 1) |
def getnodefor(self, name):
"Return the node where the ``name`` would land to"
node = self._getnodenamefor(name)
return {node: self.cluster['nodes'][node]} |
def object(self, infotype, key):
"Return the encoding, idletime, or refcount about the key"
redisent = self.redises[self._getnodenamefor(key) + '_slave']
return getattr(redisent, 'object')(infotype, key) |
def _rc_brpoplpush(self, src, dst, timeout=0):
"""
Pop a value off the tail of ``src``, push it on the head of ``dst``
and then return it.
This command blocks until a value is in ``src`` or until ``timeout``
seconds elapse, whichever is first. A ``timeout`` value of 0 blocks
forever.
Not atomic
"""
rpop = self.brpop(src, timeout)
if rpop is not None:
self.lpush(dst, rpop[1])
return rpop[1]
return None |
def _rc_rpoplpush(self, src, dst):
"""
RPOP a value off of the ``src`` list and LPUSH it
on to the ``dst`` list. Returns the value.
"""
rpop = self.rpop(src)
if rpop is not None:
self.lpush(dst, rpop)
return rpop
return None |
def _rc_sdiff(self, src, *args):
"""
Returns the members of the set resulting from the difference between
the first set and all the successive sets.
"""
args = list_or_args(src, args)
src_set = self.smembers(args.pop(0))
if src_set is not set([]):
for key in args:
src_set.difference_update(self.smembers(key))
return src_set |
def _rc_sdiffstore(self, dst, src, *args):
"""
Store the difference of sets ``src``, ``args`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
args = list_or_args(src, args)
result = self.sdiff(*args)
if result is not set([]):
return self.sadd(dst, *list(result))
return 0 |
def _rc_sinter(self, src, *args):
"""
Returns the members of the set resulting from the difference between
the first set and all the successive sets.
"""
args = list_or_args(src, args)
src_set = self.smembers(args.pop(0))
if src_set is not set([]):
for key in args:
src_set.intersection_update(self.smembers(key))
return src_set |
def _rc_sinterstore(self, dst, src, *args):
"""
Store the difference of sets ``src``, ``args`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
args = list_or_args(src, args)
result = self.sinter(*args)
if result is not set([]):
return self.sadd(dst, *list(result))
return 0 |
def _rc_smove(self, src, dst, value):
"""
Move ``value`` from set ``src`` to set ``dst``
not atomic
"""
if self.type(src) != b("set"):
return self.smove(src + "{" + src + "}", dst, value)
if self.type(dst) != b("set"):
return self.smove(dst + "{" + dst + "}", src, value)
if self.srem(src, value):
return 1 if self.sadd(dst, value) else 0
return 0 |
def _rc_sunion(self, src, *args):
"""
Returns the members of the set resulting from the union between
the first set and all the successive sets.
"""
args = list_or_args(src, args)
src_set = self.smembers(args.pop(0))
if src_set is not set([]):
for key in args:
src_set.update(self.smembers(key))
return src_set |
def _rc_sunionstore(self, dst, src, *args):
"""
Store the union of sets ``src``, ``args`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
args = list_or_args(src, args)
result = self.sunion(*args)
if result is not set([]):
return self.sadd(dst, *list(result))
return 0 |
def _rc_mset(self, mapping):
"Sets each key in the ``mapping`` dict to its corresponding value"
result = True
for k, v in iteritems(mapping):
result = result and self.set(k, v)
return result |
def _rc_msetnx(self, mapping):
"""
Sets each key in the ``mapping`` dict to its corresponding value if
none of the keys are already set
"""
for k in iterkeys(mapping):
if self.exists(k):
return False
return self._rc_mset(mapping) |
def _rc_mget(self, keys, *args):
"""
Returns a list of values ordered identically to ``*args``
"""
args = list_or_args(keys, args)
result = []
for key in args:
result.append(self.get(key))
return result |
def _rc_rename(self, src, dst):
"""
Rename key ``src`` to ``dst``
"""
if src == dst:
return self.rename(src + "{" + src + "}", src)
if not self.exists(src):
return self.rename(src + "{" + src + "}", src)
self.delete(dst)
ktype = self.type(src)
kttl = self.ttl(src)
if ktype == b('none'):
return False
if ktype == b('string'):
self.set(dst, self.get(src))
elif ktype == b('hash'):
self.hmset(dst, self.hgetall(src))
elif ktype == b('list'):
for k in self.lrange(src, 0, -1):
self.rpush(dst, k)
elif ktype == b('set'):
for k in self.smembers(src):
self.sadd(dst, k)
elif ktype == b('zset'):
for k, v in self.zrange(src, 0, -1, withscores=True):
self.zadd(dst, v, k)
# Handle keys with an expire time set
kttl = -1 if kttl is None or kttl < 0 else int(kttl)
if kttl != -1:
self.expire(dst, kttl)
return self.delete(src) |
def _rc_renamenx(self, src, dst):
"Rename key ``src`` to ``dst`` if ``dst`` doesn't already exist"
if self.exists(dst):
return False
return self._rc_rename(src, dst) |
def _rc_keys(self, pattern='*'):
"Returns a list of keys matching ``pattern``"
result = []
for alias, redisent in iteritems(self.redises):
if alias.find('_slave') == -1:
continue
result.extend(redisent.keys(pattern))
return result |
def _rc_dbsize(self):
"Returns the number of keys in the current database"
result = 0
for alias, redisent in iteritems(self.redises):
if alias.find('_slave') == -1:
continue
result += redisent.dbsize()
return result |
def prepare(self):
"""Prepare the date in the instance state for serialization.
"""
# Create a collection for the attributes and elements of
# this instance.
attributes, elements = OrderedDict(), []
# Initialize the namespace map.
nsmap = dict([self.meta.namespace])
# Iterate through all declared items.
for name, item in self._items.items():
if isinstance(item, Attribute):
# Prepare the item as an attribute.
attributes[name] = item.prepare(self)
elif isinstance(item, Element):
# Update the nsmap.
nsmap.update([item.namespace])
# Prepare the item as an element.
elements.append(item)
# Return the collected attributes and elements
return attributes, elements, nsmap |
def sign(xml, stream, password=None):
"""
Sign an XML document with the given private key file. This will add a
<Signature> element to the document.
:param lxml.etree._Element xml: The document to sign
:param file stream: The private key to sign the document with
:param str password: The password used to access the private key
:rtype: None
Example usage:
::
from saml import schema
from lxml import etree
document = schema.AuthenticationRequest()
xml_document = document.serialize()
with open('my_key_file.pem', 'r+') as stream:
sign(xml_document, stream)
print etree.tostring(xml_document)
Produces the following XML document:
.. code-block:: xml
<samlp:AuthnRequest
xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol"
xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
Version="2.0" ID="_6087de0b111b44349a70ff40191a4c0c"
IssueInstant="2015-03-16T21:06:39Z">
<Signature xmlns="http://www.w3.org/2000/09/xmldsig#">
<SignedInfo>
<CanonicalizationMethod
Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/>
<SignatureMethod
Algorithm="http://www.w3.org/2000/
09/xmldsig#rsa-sha1"/>
<Reference>
<Transforms>
<Transform
Algorithm="http://www.w3.org/2000/
09/xmldsig#enveloped-signature"/>
</Transforms>
<DigestMethod
Algorithm="http://www.w3.org/2000/
09/xmldsig#sha1"/>
<DigestValue>
94O1FOjRE4JQYVDqStkYzne9StQ=
</DigestValue>
</Reference>
</SignedInfo>
<SignatureValue>
aFYRRjtB3bDyLLJzLZmsn0K4SXmOpFYJ+8R8D31VojgiF37FOElbE56UFbm8BAjn
l2AixrUGXP4djxoxxnfBD/reYw5yVuIVXlMxKec784nF2V4GyrfwJOKaNmlVPkq5
c8SI+EkKJ02mwiail0Zvjb9FzwvlYD+osMSXvJXVqnGHQDVFlhwbBRRVB6t44/M3
TzC4mLSVhuvcpsm4GTQSpGkHP7HvweKN/OTc0aTy8Kh/YUrImwnUCii+J0EW4nGg
71eZyq/IiSPnTD09WDHsWe3g29kpicZXqrQCWeLE2zfVKtyxxs7PyEmodH19jXyz
wh9hQ8t6PFO47Ros5aV0bw==
</SignatureValue>
</Signature>
</samlp:AuthnRequest>
"""
# Import xmlsec here to delay initializing the C library in
# case we don't need it.
import xmlsec
# Resolve the SAML/2.0 element in question.
from saml.schema.base import _element_registry
element = _element_registry.get(xml.tag)
# Create a signature template for RSA-SHA1 enveloped signature.
signature_node = xmlsec.template.create(
xml,
xmlsec.Transform.EXCL_C14N,
xmlsec.Transform.RSA_SHA1)
# Add the <ds:Signature/> node to the document.
xml.insert(element.meta.signature_index, signature_node)
# Add the <ds:Reference/> node to the signature template.
ref = xmlsec.template.add_reference(
signature_node, xmlsec.Transform.SHA1)
# Add the enveloped transform descriptor.
xmlsec.template.add_transform(ref, xmlsec.Transform.ENVELOPED)
# Create a digital signature context (no key manager is needed).
ctx = xmlsec.SignatureContext()
# Load private key.
key = xmlsec.Key.from_memory(stream, xmlsec.KeyFormat.PEM, password)
# Set the key on the context.
ctx.key = key
# Sign the template.
ctx.sign(signature_node) |
def verify(xml, stream):
"""
Verify the signaure of an XML document with the given certificate.
Returns `True` if the document is signed with a valid signature.
Returns `False` if the document is not signed or if the signature is
invalid.
:param lxml.etree._Element xml: The document to sign
:param file stream: The private key to sign the document with
:rtype: Boolean
"""
# Import xmlsec here to delay initializing the C library in
# case we don't need it.
import xmlsec
# Find the <Signature/> node.
signature_node = xmlsec.tree.find_node(xml, xmlsec.Node.SIGNATURE)
if signature_node is None:
# No `signature` node found; we cannot verify
return False
# Create a digital signature context (no key manager is needed).
ctx = xmlsec.SignatureContext()
# Register <Response/> and <Assertion/>
ctx.register_id(xml)
for assertion in xml.xpath("//*[local-name()='Assertion']"):
ctx.register_id(assertion)
# Load the public key.
key = None
for fmt in [
xmlsec.KeyFormat.PEM,
xmlsec.KeyFormat.CERT_PEM]:
stream.seek(0)
try:
key = xmlsec.Key.from_memory(stream, fmt)
break
except ValueError:
# xmlsec now throws when it can't load the key
pass
# Set the key on the context.
ctx.key = key
# Verify the signature.
try:
ctx.verify(signature_node)
return True
except Exception:
return False |
def get_queryset(self, request):
"""
Add number of photos to each gallery.
"""
qs = super(GalleryAdmin, self).get_queryset(request)
return qs.annotate(photo_count=Count('photos')) |
def save_model(self, request, obj, form, change):
"""
Set currently authenticated user as the author of the gallery.
"""
obj.author = request.user
obj.save() |
def save_formset(self, request, form, formset, change):
"""
For each photo set it's author to currently authenticated user.
"""
instances = formset.save(commit=False)
for instance in instances:
if isinstance(instance, Photo):
instance.author = request.user
instance.save() |
def parse_byteranges(cls, environ):
"""
Outputs a list of tuples with ranges or the empty list
According to the rfc, start or end values can be omitted
"""
r = []
s = environ.get(cls.header_range, '').replace(' ','').lower()
if s:
l = s.split('=')
if len(l) == 2:
unit, vals = tuple(l)
if unit == 'bytes' and vals:
gen_rng = ( tuple(rng.split('-')) for rng in vals.split(',') if '-' in rng )
for start, end in gen_rng:
if start or end:
r.append( (int(start) if start else None, int(end) if end else None) )
return r |
def check_ranges(cls, ranges, length):
"""Removes errored ranges"""
result = []
for start, end in ranges:
if isinstance(start, int) or isinstance(end, int):
if isinstance(start, int) and not (0 <= start < length):
continue
elif isinstance(start, int) and isinstance(end, int) and not (start <= end):
continue
elif start is None and end == 0:
continue
result.append( (start,end) )
return result |
def convert_ranges(cls, ranges, length):
"""Converts to valid byte ranges"""
result = []
for start, end in ranges:
if end is None:
result.append( (start, length-1) )
elif start is None:
s = length - end
result.append( (0 if s < 0 else s, length-1) )
else:
result.append( (start, end if end < length else length-1) )
return result |
def condense_ranges(cls, ranges):
"""Sorts and removes overlaps"""
result = []
if ranges:
ranges.sort(key=lambda tup: tup[0])
result.append(ranges[0])
for i in range(1, len(ranges)):
if result[-1][1] + 1 >= ranges[i][0]:
result[-1] = (result[-1][0], max(result[-1][1], ranges[i][1]))
else:
result.append(ranges[i])
return result |
def social_widget_render(parser, token):
""" Renders the selected social widget. You can specify optional settings
that will be passed to widget template.
Sample usage:
{% social_widget_render widget_template ke1=val1 key2=val2 %}
For example to render Twitter follow button you can use code like this:
{% social_widget_render 'twitter/follow_button.html' username="ev" %}
"""
bits = token.split_contents()
tag_name = bits[0]
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument" %
tag_name)
args = []
kwargs = {}
bits = bits[1:]
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to %s tag" %
tag_name)
name, value = match.groups()
if name:
# Replacing hyphens with underscores because
# variable names cannot contain hyphens.
name = name.replace('-', '_')
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return SocialWidgetNode(args, kwargs) |
def add(self, addend_mat, axis=1):
"""
In-place addition
:param addend_mat: A matrix to be added on the Sparse3DMatrix object
:param axis: The dimension along the addend_mat is added
:return: Nothing (as it performs in-place operations)
"""
if self.finalized:
if axis == 0:
raise NotImplementedError('The method is not yet implemented for the axis.')
elif axis == 1:
for hid in xrange(self.shape[1]):
self.data[hid] = self.data[hid] + addend_mat
elif axis == 2:
raise NotImplementedError('The method is not yet implemented for the axis.')
else:
raise RuntimeError('The axis should be 0, 1, or 2.')
else:
raise RuntimeError('The original matrix must be finalized.') |
def multiply(self, multiplier, axis=None):
"""
In-place multiplication
:param multiplier: A matrix or vector to be multiplied
:param axis: The dim along which 'multiplier' is multiplied
:return: Nothing (as it performs in-place operations)
"""
if self.finalized:
if multiplier.ndim == 1:
if axis == 0: # multiplier is np.array of length |haplotypes|
raise NotImplementedError('The method is not yet implemented for the axis.')
elif axis == 1: # multiplier is np.array of length |loci|
sz = len(multiplier)
multiplier_mat = lil_matrix((sz, sz))
multiplier_mat.setdiag(multiplier)
for hid in xrange(self.shape[1]):
self.data[hid] = self.data[hid] * multiplier_mat
elif axis == 2: # multiplier is np.array of length |reads|
for hid in xrange(self.shape[1]):
self.data[hid].data *= multiplier[self.data[hid].indices]
else:
raise RuntimeError('The axis should be 0, 1, or 2.')
elif multiplier.ndim == 2:
if axis == 0: # multiplier is sp.sparse matrix of shape |reads| x |haplotypes|
for hid in xrange(self.shape[1]):
self.data[hid].data *= multiplier[self.data[hid].indices, hid]
elif axis == 1: # multiplier is sp.sparse matrix of shape |reads| x |loci|
for hid in xrange(self.shape[1]):
self.data[hid] = self.data[hid].multiply(multiplier)
elif axis == 2: # multiplier is np.matrix of shape |haplotypes| x |loci|
for hid in xrange(self.shape[1]):
multiplier_vec = multiplier[hid, :]
multiplier_vec = multiplier_vec.ravel()
self.data[hid].data *= multiplier_vec.repeat(np.diff(self.data[hid].indptr))
else:
raise RuntimeError('The axis should be 0, 1, or 2.')
elif isinstance(multiplier, Sparse3DMatrix): # multiplier is Sparse3DMatrix object
for hid in xrange(self.shape[1]):
self.data[hid] = self.data[hid].multiply(multiplier.data[hid])
else:
raise RuntimeError('The multiplier should be 1, 2 dimensional numpy array or a Sparse3DMatrix object.')
else:
raise RuntimeError('The original matrix must be finalized.') |
def prepare(self, pseudocount=0.0, lenfile=None, read_length=100):
"""
Initializes the probability of read origin according to the alignment profile
:param pseudocount: Uniform prior for allele specificity estimation
:return: Nothing (as it performs an in-place operations)
"""
if self.probability.num_groups > 0:
self.grp_conv_mat = lil_matrix((self.probability.num_loci, self.probability.num_groups))
for i in xrange(self.probability.num_groups):
self.grp_conv_mat[self.probability.groups[i], i] = 1.0
self.grp_conv_mat = self.grp_conv_mat.tocsc()
self.t2t_mat = eye(self.probability.num_loci, self.probability.num_loci)
self.t2t_mat = self.t2t_mat.tolil()
for tid_list in self.probability.groups:
for ii in xrange(len(tid_list)):
for jj in xrange(ii):
i = tid_list[ii]
j = tid_list[jj]
self.t2t_mat[i, j] = 1
self.t2t_mat[j, i] = 1
self.t2t_mat = self.t2t_mat.tocsc()
if lenfile is not None:
hid = dict(zip(self.probability.hname, np.arange(len(self.probability.hname))))
self.target_lengths = np.zeros((self.probability.num_loci, self.probability.num_haplotypes))
if self.probability.num_haplotypes > 1:
with open(lenfile) as fh:
for curline in fh:
item = curline.rstrip().split("\t")
locus, hap = item[0].split("_")
self.target_lengths[self.probability.lid[locus], hid[hap]] = max(float(item[1]) - read_length + 1.0, 1.0)
elif self.probability.num_haplotypes > 0:
with open(lenfile) as fh:
for curline in fh:
item = curline.rstrip().split("\t")
self.target_lengths[self.probability.lid[item[0]], 0] = max(float(item[1]) - read_length + 1.0, 1.0)
else:
raise RuntimeError('There is something wrong with your emase-format alignment file.')
self.target_lengths = self.target_lengths.transpose()
#self.target_lengths = self.target_lengths.transpose() / read_length # lengths in terms of read counts
if not np.all(self.target_lengths > 0.0):
raise RuntimeError('There exist transcripts missing length information.')
self.probability.normalize_reads(axis=APM.Axis.READ) # Initialize alignment probability matrix
self.allelic_expression = self.probability.sum(axis=APM.Axis.READ)
if self.target_lengths is not None: # allelic_expression will be at depth-level
self.allelic_expression = np.divide(self.allelic_expression, self.target_lengths)
if pseudocount > 0.0: # pseudocount is at depth-level
orig_allelic_expression_sum = self.allelic_expression.sum()
nzloci = np.nonzero(self.allelic_expression)[1]
self.allelic_expression[:, nzloci] += pseudocount
self.allelic_expression *= (orig_allelic_expression_sum / self.allelic_expression.sum()) |
def reset(self, pseudocount=0.0):
"""
Initializes the probability of read origin according to the alignment profile
:param pseudocount: Uniform prior for allele specificity estimation
:return: Nothing (as it performs an in-place operations)
"""
self.probability.reset()
self.probability.normalize_reads(axis=APM.Axis.READ) # Initialize alignment probability matrix
self.allelic_expression = self.probability.sum(axis=APM.Axis.READ)
if self.target_lengths is not None: # allelic_expression will be at depth-level
self.allelic_expression = np.divide(self.allelic_expression, self.target_lengths)
if pseudocount > 0.0: # pseudocount is at depth-level
orig_allelic_expression_sum = self.allelic_expression.sum()
nzloci = np.nonzero(self.allelic_expression)[1]
self.allelic_expression[:, nzloci] += pseudocount
self.allelic_expression *= (orig_allelic_expression_sum / self.allelic_expression.sum()) |
def update_probability_at_read_level(self, model=3):
"""
Updates the probability of read origin at read level
:param model: Normalization model (1: Gene->Allele->Isoform, 2: Gene->Isoform->Allele, 3: Gene->Isoform*Allele, 4: Gene*Isoform*Allele)
:return: Nothing (as it performs in-place operations)
"""
self.probability.reset() # reset to alignment incidence matrix
if model == 1:
self.probability.multiply(self.allelic_expression, axis=APM.Axis.READ)
self.probability.normalize_reads(axis=APM.Axis.HAPLOGROUP, grouping_mat=self.t2t_mat)
haplogroup_sum_mat = self.allelic_expression * self.t2t_mat
self.probability.multiply(haplogroup_sum_mat, axis=APM.Axis.READ)
self.probability.normalize_reads(axis=APM.Axis.GROUP, grouping_mat=self.t2t_mat)
self.probability.multiply(haplogroup_sum_mat.sum(axis=0), axis=APM.Axis.HAPLOTYPE)
self.probability.normalize_reads(axis=APM.Axis.READ)
elif model == 2:
self.probability.multiply(self.allelic_expression, axis=APM.Axis.READ)
self.probability.normalize_reads(axis=APM.Axis.LOCUS)
self.probability.multiply(self.allelic_expression.sum(axis=0), axis=APM.Axis.HAPLOTYPE)
self.probability.normalize_reads(axis=APM.Axis.GROUP, grouping_mat=self.t2t_mat)
self.probability.multiply((self.allelic_expression * self.t2t_mat).sum(axis=0), axis=APM.Axis.HAPLOTYPE)
self.probability.normalize_reads(axis=APM.Axis.READ)
elif model == 3:
self.probability.multiply(self.allelic_expression, axis=APM.Axis.READ)
self.probability.normalize_reads(axis=APM.Axis.GROUP, grouping_mat=self.t2t_mat)
self.probability.multiply((self.allelic_expression * self.t2t_mat).sum(axis=0), axis=APM.Axis.HAPLOTYPE)
self.probability.normalize_reads(axis=APM.Axis.READ)
elif model == 4:
self.probability.multiply(self.allelic_expression, axis=APM.Axis.READ)
self.probability.normalize_reads(axis=APM.Axis.READ)
else:
raise RuntimeError('The read normalization model should be 1, 2, 3, or 4.') |
def update_allelic_expression(self, model=3):
"""
A single EM step: Update probability at read level and then re-estimate allelic specific expression
:param model: Normalization model (1: Gene->Allele->Isoform, 2: Gene->Isoform->Allele, 3: Gene->Isoform*Allele, 4: Gene*Isoform*Allele)
:return: Nothing (as it performs in-place operations)
"""
self.update_probability_at_read_level(model)
self.allelic_expression = self.probability.sum(axis=APM.Axis.READ)
if self.target_lengths is not None:
self.allelic_expression = np.divide(self.allelic_expression, self.target_lengths) |
def run(self, model, tol=0.001, max_iters=999, verbose=True):
"""
Runs EM iterations
:param model: Normalization model (1: Gene->Allele->Isoform, 2: Gene->Isoform->Allele, 3: Gene->Isoform*Allele, 4: Gene*Isoform*Allele)
:param tol: Tolerance for termination
:param max_iters: Maximum number of iterations until termination
:param verbose: Display information on how EM is running
:return: Nothing (as it performs in-place operations)
"""
orig_err_states = np.seterr(all='raise')
np.seterr(under='ignore')
if verbose:
print
print "Iter No Time (hh:mm:ss) Total change (TPM) "
print "------- --------------- ----------------------"
num_iters = 0
err_sum = 1000000.0
time0 = time.time()
target_err = 1000000.0 * tol
while err_sum > target_err and num_iters < max_iters:
prev_isoform_expression = self.get_allelic_expression().sum(axis=0)
prev_isoform_expression *= (1000000.0 / prev_isoform_expression.sum())
self.update_allelic_expression(model=model)
curr_isoform_expression = self.get_allelic_expression().sum(axis=0)
curr_isoform_expression *= (1000000.0 / curr_isoform_expression.sum())
err = np.abs(curr_isoform_expression - prev_isoform_expression)
err_sum = err.sum()
num_iters += 1
if verbose:
time1 = time.time()
delmin, s = divmod(int(time1 - time0), 60)
h, m = divmod(delmin, 60)
print " %5d %4d:%02d:%02d %9.1f / 1000000" % (num_iters, h, m, s, err_sum) |
def report_read_counts(self, filename, grp_wise=False, reorder='as-is', notes=None):
"""
Exports expected read counts
:param filename: File name for output
:param grp_wise: whether the report is at isoform level or gene level
:param reorder: whether the report should be either 'decreasing' or 'increasing' order or just 'as-is'
:return: Nothing but the method writes a file
"""
expected_read_counts = self.probability.sum(axis=APM.Axis.READ)
if grp_wise:
lname = self.probability.gname
expected_read_counts = expected_read_counts * self.grp_conv_mat
else:
lname = self.probability.lname
total_read_counts = expected_read_counts.sum(axis=0)
if reorder == 'decreasing':
report_order = np.argsort(total_read_counts.flatten())
report_order = report_order[::-1]
elif reorder == 'increasing':
report_order = np.argsort(total_read_counts.flatten())
elif reorder == 'as-is':
report_order = np.arange(len(lname)) # report in the original locus order
cntdata = np.vstack((expected_read_counts, total_read_counts))
fhout = open(filename, 'w')
fhout.write("locus\t" + "\t".join(self.probability.hname) + "\ttotal")
if notes is not None:
fhout.write("\tnotes")
fhout.write("\n")
for locus_id in report_order:
lname_cur = lname[locus_id]
fhout.write("\t".join([lname_cur] + map(str, cntdata[:, locus_id].ravel())))
if notes is not None:
fhout.write("\t%s" % notes[lname_cur])
fhout.write("\n")
fhout.close() |
def report_depths(self, filename, tpm=True, grp_wise=False, reorder='as-is', notes=None):
"""
Exports expected depths
:param filename: File name for output
:param grp_wise: whether the report is at isoform level or gene level
:param reorder: whether the report should be either 'decreasing' or 'increasing' order or just 'as-is'
:return: Nothing but the method writes a file
"""
if grp_wise:
lname = self.probability.gname
depths = self.allelic_expression * self.grp_conv_mat
else:
lname = self.probability.lname
depths = self.allelic_expression
if tpm:
depths *= (1000000.0 / depths.sum())
total_depths = depths.sum(axis=0)
if reorder == 'decreasing':
report_order = np.argsort(total_depths.flatten())
report_order = report_order[::-1]
elif reorder == 'increasing':
report_order = np.argsort(total_depths.flatten())
elif reorder == 'as-is':
report_order = np.arange(len(lname)) # report in the original locus order
cntdata = np.vstack((depths, total_depths))
fhout = open(filename, 'w')
fhout.write("locus\t" + "\t".join(self.probability.hname) + "\ttotal")
if notes is not None:
fhout.write("\tnotes")
fhout.write("\n")
for locus_id in report_order:
lname_cur = lname[locus_id]
fhout.write("\t".join([lname_cur] + map(str, cntdata[:, locus_id].ravel())))
if notes is not None:
fhout.write("\t%s" % notes[lname_cur])
fhout.write("\n")
fhout.close() |
def export_posterior_probability(self, filename, title="Posterior Probability"):
"""
Writes the posterior probability of read origin
:param filename: File name for output
:param title: The title of the posterior probability matrix
:return: Nothing but the method writes a file in EMASE format (PyTables)
"""
self.probability.save(h5file=filename, title=title) |
def bundle(self, reset=False, shallow=False): # Copies the original matrix (Use lots of memory)
"""
Returns ``AlignmentPropertyMatrix`` object in which loci are bundled using grouping information.
:param reset: whether to reset the values at the loci
:param shallow: whether to copy all the meta data
"""
if self.finalized:
# if self.num_groups > 0:
if self.groups is not None and self.gname is not None:
grp_conv_mat = lil_matrix((self.num_loci, self.num_groups))
for i in xrange(self.num_groups):
grp_conv_mat[self.groups[i], i] = 1.0
grp_align = Sparse3DMatrix.__mul__(self, grp_conv_mat) # The core of the bundling
grp_align.num_loci = self.num_groups
grp_align.num_haplotypes = self.num_haplotypes
grp_align.num_reads = self.num_reads
grp_align.shape = (grp_align.num_loci, grp_align.num_haplotypes, grp_align.num_reads)
if not shallow:
grp_align.lname = copy.copy(self.gname)
grp_align.hname = self.hname
grp_align.rname = copy.copy(self.rname)
grp_align.lid = dict(zip(grp_align.lname, np.arange(grp_align.num_loci)))
grp_align.rid = copy.copy(self.rid)
if reset:
grp_align.reset()
return grp_align
else:
raise RuntimeError('No group information is available for bundling.')
else:
raise RuntimeError('The matrix is not finalized.') |
def normalize_reads(self, axis, grouping_mat=None):
"""
Read-wise normalization
:param axis: The dimension along which we want to normalize values
:param grouping_mat: An incidence matrix that specifies which isoforms are from a same gene
:return: Nothing (as the method performs in-place operations)
:rtype: None
"""
if self.finalized:
if axis == self.Axis.LOCUS: # Locus-wise normalization on each read
normalizer = self.sum(axis=self.Axis.HAPLOTYPE) # Sparse matrix of |reads| x |loci|
normalizer.eliminate_zeros()
for hid in xrange(self.num_haplotypes):
self.data[hid].eliminate_zeros() # Trying to avoid numerical problem (inf or nan)
self.data[hid] = np.divide(self.data[hid], normalizer) # element-wise division
elif axis == self.Axis.HAPLOTYPE: # haplotype-wise normalization on each read
for hid in xrange(self.num_haplotypes):
normalizer = self.data[hid].sum(axis=self.Axis.HAPLOTYPE) # 1-dim Sparse matrix of |reads| x 1
normalizer = normalizer.A.flatten()
self.data[hid].data /= normalizer[self.data[hid].indices]
elif axis == self.Axis.READ: # normalization each read as a whole
sum_mat = self.sum(axis=self.Axis.LOCUS)
normalizer = sum_mat.sum(axis=self.Axis.HAPLOTYPE)
normalizer = normalizer.ravel()
for hid in xrange(self.num_haplotypes):
self.data[hid].data /= normalizer[self.data[hid].indices]
elif axis == self.Axis.GROUP: # group-wise normalization on each read
if grouping_mat is None:
raise RuntimeError('Group information matrix is missing.')
normalizer = self.sum(axis=self.Axis.HAPLOTYPE) * grouping_mat
for hid in xrange(self.num_haplotypes):
self.data[hid].eliminate_zeros() # Trying to avoid numerical problem (inf or nan)
self.data[hid] = np.divide(self.data[hid], normalizer)
elif axis == self.Axis.HAPLOGROUP: # haplotype-wise & group-wise normalization on each read
if grouping_mat is None:
raise RuntimeError('Group information matrix is missing.')
for hid in xrange(self.num_haplotypes): # normalizer is different hap-by-hap
normalizer = self.data[hid] * grouping_mat # Sparse matrix of |reads| x |loci|
self.data[hid].eliminate_zeros() # Trying to avoid numerical problem (inf or nan)
self.data[hid] = np.divide(self.data[hid], normalizer)
else:
raise RuntimeError('The axis should be 0, 1, 2, or 3.')
else:
raise RuntimeError('The original matrix must be finalized.') |
def pull_alignments_from(self, reads_to_use, shallow=False):
"""
Pull out alignments of certain reads
:param reads_to_use: numpy array of dtype=bool specifying which reads to use
:param shallow: whether to copy sparse 3D matrix only or not
:return: a new AlignmentPropertyMatrix object that particular reads are
"""
new_alnmat = self.copy(shallow=shallow)
for hid in xrange(self.num_haplotypes):
hdata = new_alnmat.data[hid]
hdata.data *= reads_to_use[hdata.indices]
hdata.eliminate_zeros()
if new_alnmat.count is not None:
new_alnmat.count[np.logical_not(reads_to_use)] = 0
return new_alnmat |
def get_unique_reads(self, ignore_haplotype=False, shallow=False):
"""
Pull out alignments of uniquely-aligning reads
:param ignore_haplotype: whether to regard allelic multiread as uniquely-aligning read
:param shallow: whether to copy sparse 3D matrix only or not
:return: a new AlignmentPropertyMatrix object that particular reads are
"""
if self.finalized:
if ignore_haplotype:
summat = self.sum(axis=self.Axis.HAPLOTYPE)
nnz_per_read = np.diff(summat.tocsr().indptr)
unique_reads = np.logical_and(nnz_per_read > 0, nnz_per_read < 2)
else: # allelic multireads should be removed
alncnt_per_read = self.sum(axis=self.Axis.LOCUS).sum(axis=self.Axis.HAPLOTYPE)
unique_reads = np.logical_and(alncnt_per_read > 0, alncnt_per_read < 2)
return self.pull_alignments_from(unique_reads, shallow=shallow)
else:
raise RuntimeError('The matrix is not finalized.') |
def print_read(self, rid):
"""
Prints nonzero rows of the read wanted
"""
if self.rname is not None:
print self.rname[rid]
print '--'
r = self.get_read_data(rid)
aligned_loci = np.unique(r.nonzero()[1])
for locus in aligned_loci:
nzvec = r[:, locus].todense().transpose()[0].A.flatten()
if self.lname is not None:
print self.lname[locus],
else:
print locus,
print nzvec |
def get_standard_form(self, data):
"""Roman schemes define multiple representations of the same devanAgarI character. This method gets a library-standard representation.
data : a text in the given scheme.
"""
if self.synonym_map is None:
return data
from indic_transliteration import sanscript
return sanscript.transliterate(data=sanscript.transliterate(_from=self.name, _to=sanscript.DEVANAGARI, data=data), _from=sanscript.DEVANAGARI, _to=self.name) |
def _roman(data, scheme_map, **kw):
"""Transliterate `data` with the given `scheme_map`. This function is used
when the source scheme is a Roman scheme.
:param data: the data to transliterate
:param scheme_map: a dict that maps between characters in the old scheme
and characters in the new scheme
"""
vowels = scheme_map.vowels
marks = scheme_map.marks
virama = scheme_map.virama
consonants = scheme_map.consonants
non_marks_viraama = scheme_map.non_marks_viraama
max_key_length_from_scheme = scheme_map.max_key_length_from_scheme
to_roman = scheme_map.to_scheme.is_roman
togglers = kw.pop('togglers', set())
suspend_on = kw.pop('suspend_on', set())
suspend_off = kw.pop('suspend_off', set())
if kw:
raise TypeError('Unexpected keyword argument %s' % list(kw.keys())[0])
buf = []
i = 0
had_consonant = found = False
len_data = len(data)
append = buf.append
# If true, don't transliterate. The toggle token is discarded.
toggled = False
# If true, don't transliterate. The suspend token is retained.
# `suspended` overrides `toggled`.
suspended = False
while i <= len_data:
# The longest token in the source scheme has length `max_key_length_from_scheme`. Iterate
# over `data` while taking `max_key_length_from_scheme` characters at a time. If we don`t
# find the character group in our scheme map, lop off a character and
# try again.
#
# If we've finished reading through `data`, then `token` will be empty
# and the loop below will be skipped.
token = data[i:i + max_key_length_from_scheme]
while token:
if token in togglers:
toggled = not toggled
i += 2 # skip over the token
found = True # force the token to fill up again
break
if token in suspend_on:
suspended = True
elif token in suspend_off:
suspended = False
if toggled or suspended:
token = token[:-1]
continue
# Catch the pattern CV, where C is a consonant and V is a vowel.
# V should be rendered as a vowel mark, a.k.a. a "dependent"
# vowel. But due to the nature of Brahmic scripts, 'a' is implicit
# and has no vowel mark. If we see 'a', add nothing.
if had_consonant and token in vowels:
mark = marks.get(token, '')
if mark:
append(mark)
elif to_roman:
append(vowels[token])
found = True
# Catch any non_marks_viraama character, including consonants, punctuation,
# and regular vowels. Due to the implicit 'a', we must explicitly
# end any lingering consonants before we can handle the current
# token.
elif token in non_marks_viraama:
if had_consonant:
append(virama[''])
append(non_marks_viraama[token])
found = True
if found:
had_consonant = token in consonants
i += len(token)
break
else:
token = token[:-1]
# We've exhausted the token; this must be some other character. Due to
# the implicit 'a', we must explicitly end any lingering consonants
# before we can handle the current token.
if not found:
if had_consonant:
append(virama[''])
if i < len_data:
append(data[i])
had_consonant = False
i += 1
found = False
return ''.join(buf) |
def get_approx_deduplicating_key(text, encoding_scheme=sanscript.DEVANAGARI):
"""
Given some devanAgarI sanskrit text, this function produces a "key" so
that
1] The key should be the same for different observed orthographical
forms of the same text. For example:
::
- "dharmma" vs "dharma"
- "rAmaM gacChati" vs "rAma~N gacChati" vs "rAma~N gacChati"
- "kurvan eva" vs "kurvanneva"
2] The key should be different for different for different texts.
- "stamba" vs "stambha"
This function attempts to succeed at [1] and [2] almostall the time.
Longer the text, probability of failing at [2] decreases, while
probability of failing at [1] increases (albeit very slightly).
Sources of orthographically divergent forms:
- Phonetically sensible grammar rules
- Neglect of sandhi while writing
- Punctuation, spaces, avagraha-s.
- Regional-language-influenced mistakes (La instead of la.)
Some example applications of this function:
- Create a database of quotes or words with minimal duplication.
- Search a database of quotes or words while being robust to optional
forms.
Also see equivalent function in the scala indic-transliteration package.
"""
if encoding_scheme == sanscript.DEVANAGARI:
key = text
key = regex.sub("\\P{IsDevanagari}", "", key)
# Remove spaces
key = regex.sub("\\s", "", key)
# Remove punctuations
key = regex.sub("\\p{P}", "", key)
# Remove digits, abbreviation sign, svara-s, avagraha
key = regex.sub("[०-९।॥॰ऽ]|[॑-॔]", "", key)
# Collapse semi-vowel-anunAsika-s संलग्नम् सल्ँलग्नम् into m
key = regex.sub("[यरल]्ँ", "म्", key)
# Collapse all panchama-s into m
key = regex.sub("[ङञणन]", "म", key)
# Collapse anusvAra into m
key = regex.sub("ँ|ं", "म्", key)
key = regex.sub("ॐ", "ओम्", key)
key = regex.sub("[ळऴ]", "ल", key)
# Deal with optional forms where consonants are duplicated - like dharmma
# Details in https://docs.google.com/spreadsheets/d/1GP8Ps_hmgCGLZPWKIVBCfQB9ZmPQOaCwTrH9OybaWaQ/edit#gid=21
key = regex.sub("([क-हक़-य़])्\\1+", "\\1", key)
key = regex.sub("[कग]्ख्", "ख्", key)
key = regex.sub("[कग]्घ्", "घ्", key)
key = regex.sub("च्छ्", "छ्", key)
key = regex.sub("ज्झ्", "झ्", key)
key = regex.sub("त्थ्", "थ्", key)
key = regex.sub("द्ध्", "ध्", key)
key = regex.sub("ड्ढ्", "ढ्", key)
key = regex.sub("प्फ्", "फ्", key)
key = regex.sub("ब्भ्", "भ्", key)
return key
else:
logging.warning("got script {} for '{}'".format(encoding_scheme, text))
return regex.sub("\\s", "", text) |
def _brahmic(data, scheme_map, **kw):
"""Transliterate `data` with the given `scheme_map`. This function is used
when the source scheme is a Brahmic scheme.
:param data: the data to transliterate
:param scheme_map: a dict that maps between characters in the old scheme
and characters in the new scheme
"""
if scheme_map.from_scheme.name == northern.GURMUKHI:
data = northern.GurmukhiScheme.replace_tippi(text=data)
marks = scheme_map.marks
virama = scheme_map.virama
consonants = scheme_map.consonants
non_marks_viraama = scheme_map.non_marks_viraama
to_roman = scheme_map.to_scheme.is_roman
max_key_length_from_scheme = scheme_map.max_key_length_from_scheme
buf = []
i = 0
to_roman_had_consonant = found = False
append = buf.append
# logging.debug(pprint.pformat(scheme_map.consonants))
# We dont just translate each brAhmic character one after another in order to prefer concise transliterations when possible - for example ज्ञ -> jn in optitrans rather than j~n.
while i <= len(data):
# The longest token in the source scheme has length `max_key_length_from_scheme`. Iterate
# over `data` while taking `max_key_length_from_scheme` characters at a time. If we don`t
# find the character group in our scheme map, lop off a character and
# try again.
#
# If we've finished reading through `data`, then `token` will be empty
# and the loop below will be skipped.
token = data[i:i + max_key_length_from_scheme]
while token:
if len(token) == 1:
if token in marks:
append(marks[token])
found = True
elif token in virama:
append(virama[token])
found = True
else:
if to_roman_had_consonant:
append('a')
append(non_marks_viraama.get(token, token))
found = True
else:
if token in non_marks_viraama:
if to_roman_had_consonant:
append('a')
append(non_marks_viraama.get(token))
found = True
if found:
to_roman_had_consonant = to_roman and token in consonants
i += len(token)
break
else:
token = token[:-1]
# Continuing the outer while loop.
# We've exhausted the token; this must be some other character. Due to
# the implicit 'a', we must explicitly end any lingering consonants
# before we can handle the current token.
if not found:
if to_roman_had_consonant:
append(next(iter(virama.values())))
if i < len(data):
append(data[i])
to_roman_had_consonant = False
i += 1
found = False
if to_roman_had_consonant:
append('a')
return ''.join(buf) |
def transliterate(data, _from=None, _to=None, scheme_map=None, **kw):
"""Transliterate `data` with the given parameters::
output = transliterate('idam adbhutam', HK, DEVANAGARI)
Each time the function is called, a new :class:`SchemeMap` is created
to map the input scheme to the output scheme. This operation is fast
enough for most use cases. But for higher performance, you can pass a
pre-computed :class:`SchemeMap` instead::
scheme_map = SchemeMap(SCHEMES[HK], SCHEMES[DEVANAGARI])
output = transliterate('idam adbhutam', scheme_map=scheme_map)
:param data: the data to transliterate
:param scheme_map: the :class:`SchemeMap` to use. If specified, ignore
`_from` and `_to`. If unspecified, create a
:class:`SchemeMap` from `_from` to `_to`.
"""
if scheme_map is None:
scheme_map = _get_scheme_map(_from, _to)
options = {
'togglers': {'##'},
'suspend_on': set('<'),
'suspend_off': set('>')
}
options.update(kw)
from indic_transliteration.sanscript.brahmic_mapper import _brahmic
from indic_transliteration.sanscript.roman_mapper import _roman
func = _roman if scheme_map.from_scheme.is_roman else _brahmic
return func(data, scheme_map, **options) |
def detect(text):
"""Detect the input's transliteration scheme.
:param text: some text data, either a `unicode` or a `str` encoded
in UTF-8.
"""
if sys.version_info < (3, 0):
# Verify encoding
try:
text = text.decode('utf-8')
except UnicodeError:
pass
# Brahmic schemes are all within a specific range of code points.
for L in text:
code = ord(L)
if code >= BRAHMIC_FIRST_CODE_POINT:
for name, start_code in BLOCKS:
if start_code <= code <= BRAHMIC_LAST_CODE_POINT:
return name
# Romanizations
if Regex.IAST_OR_KOLKATA_ONLY.search(text):
if Regex.KOLKATA_ONLY.search(text):
return Scheme.Kolkata
else:
return Scheme.IAST
if Regex.ITRANS_ONLY.search(text):
return Scheme.ITRANS
if Regex.SLP1_ONLY.search(text):
return Scheme.SLP1
if Regex.VELTHUIS_ONLY.search(text):
return Scheme.Velthuis
if Regex.ITRANS_OR_VELTHUIS_ONLY.search(text):
return Scheme.ITRANS
return Scheme.HK |
def transliterate(data, _from=None, _to=None, scheme_map=None, **kw):
"""Transliterate `data` with the given parameters::
output = transliterate('idam adbhutam', HK, DEVANAGARI)
Each time the function is called, a new :class:`SchemeMap` is created
to map the input scheme to the output scheme. This operation is fast
enough for most use cases. But for higher performance, you can pass a
pre-computed :class:`SchemeMap` instead::
scheme_map = SchemeMap(SCHEMES[HK], SCHEMES[DEVANAGARI])
output = transliterate('idam adbhutam', scheme_map=scheme_map)
:param data: the data to transliterate
:param _from: the name of a source scheme
:param _to: the name of a destination scheme
:param scheme_map: the :class:`SchemeMap` to use. If specified, ignore
`_from` and `_to`. If unspecified, create a
:class:`SchemeMap` from `_from` to `_to`.
"""
if scheme_map is None:
from_scheme = SCHEMES[_from]
to_scheme = SCHEMES[_to]
scheme_map = sanscript.SchemeMap(from_scheme, to_scheme)
return sanscript.transliterate(data=data, scheme_map=scheme_map) |
def _setup():
"""Add a variety of default schemes."""
s = str.split
if sys.version_info < (3, 0):
# noinspection PyUnresolvedReferences
s = unicode.split
def pop_all(some_dict, some_list):
for scheme in some_list:
some_dict.pop(scheme)
global SCHEMES
SCHEMES = copy.deepcopy(sanscript.SCHEMES)
pop_all(SCHEMES, [sanscript.ORIYA, sanscript.BENGALI, sanscript.GUJARATI])
SCHEMES[HK].update({
'vowels': s("""a A i I u U R RR lR lRR E ai O au""") + s("""e o"""),
'marks': s("""A i I u U R RR lR lRR E ai O au""") + s("""e o"""),
'consonants': sanscript.SCHEMES[HK]['consonants'] + s("""n2 r2 zh""")
})
SCHEMES[ITRANS].update({
'vowels': s("""a A i I u U R RR LLi LLI E ai O au""") + s("""e o"""),
'marks': s("""A i I u U R RR LLi LLI E ai O au""") + s("""e o"""),
'consonants': sanscript.SCHEMES[ITRANS]['consonants'] + s("""n2 r2 zh""")
})
pop_all(SCHEMES[ITRANS].synonym_map, s("""e o"""))
SCHEMES[OPTITRANS].update({
'vowels': s("""a A i I u U R RR LLi LLI E ai O au""") + s("""e o"""),
'marks': s("""A i I u U R RR LLi LLI E ai O au""") + s("""e o"""),
'consonants': sanscript.SCHEMES[OPTITRANS]['consonants'] + s("""n2 r2 zh""")
})
pop_all(SCHEMES[OPTITRANS].synonym_map, s("""e o""")) |
def to_utf8(y):
"""
converts an array of integers to utf8 string
"""
out = []
for x in y:
if x < 0x080:
out.append(x)
elif x < 0x0800:
out.append((x >> 6) | 0xC0)
out.append((x & 0x3F) | 0x80)
elif x < 0x10000:
out.append((x >> 12) | 0xE0)
out.append(((x >> 6) & 0x3F) | 0x80)
out.append((x & 0x3F) | 0x80)
else:
out.append((x >> 18) | 0xF0)
out.append((x >> 12) & 0x3F)
out.append(((x >> 6) & 0x3F) | 0x80)
out.append((x & 0x3F) | 0x80)
return ''.join(map(chr, out)) |
def set_script(self, i):
"""
set the value of delta to reflect the current codepage
"""
if i in range(1, 10):
n = i - 1
else:
raise IllegalInput("Invalid Value for ATR %s" % (hex(i)))
if n > -1: # n = -1 is the default script ..
self.curr_script = n
self.delta = n * DELTA
return |
def _unrecognised(chr):
""" Handle unrecognised characters. """
if options['handleUnrecognised'] == UNRECOGNISED_ECHO:
return chr
elif options['handleUnrecognised'] == UNRECOGNISED_SUBSTITUTE:
return options['substituteChar']
else:
raise (KeyError, chr) |
def main(argv=None):
""" Call transliterator from a command line.
python transliterator.py text inputFormat outputFormat
... writes the transliterated text to stdout
text -- the text to be transliterated OR the name of a file containing the text
inputFormat -- the name of the character block or transliteration scheme that
the text is to be transliterated FROM, e.g. 'CYRILLIC', 'IAST'.
Not case-sensitive
outputFormat -- the name of the character block or transliteration scheme that
the text is to be transliterated TO, e.g. 'CYRILLIC', 'IAST'.
Not case-sensitive
"""
print (transliterate('jaya gaNeza! zrIrAmajayam', 'harvardkyoto', 'devanagari'))
if argv is None:
argv = sys.argv
try:
text, inFormat, outFormat = argv[1:4]
except ValueError:
print (main.__doc__)
return 2
inFormat = inFormat.upper()
outFormat = outFormat.upper()
# try assuming "text" is a filename
try:
f = open(text)
except IOError:
# it wasn't, so it must be the actual text
print (transliterate(text, inFormat, outFormat))
return 0
else:
i = 1
for text in f.readlines():
if len(text) > 0 and not text.startswith('#'):
print (transliterate(text, inFormat, outFormat).strip('\n'))
i = i + 1
f.close()
return 0 |
def _transliterate(self, text, outFormat):
""" Transliterate a devanagari text into the target format.
Transliterating a character to or from Devanagari is not a simple
lookup: it depends on the preceding and following characters.
"""
def getResult():
if curMatch.isspace():
result.append(curMatch)
return
if prevMatch in self:
prev = self[prevMatch]
else:
prev = None
if nextMatch in self:
next = self[nextMatch]
else:
next = None
try:
equiv = outFormat._equivalent(self[curMatch],
prev, #self.get(prevMatch, None),
next, #self.get(nextMatch, None),
self._implicitA)
except KeyError:
equiv = _unrecognised(curMatch)
for e in equiv:
result.append(e)
def incr(c):
if self._longestEntry == 1:
return 1
return len(c)
result = []
text = self._preprocess(text)
i = 0
prevMatch = None
nextMatch = None
curMatch = self._getNextChar(text, i)
i = i + len(curMatch)
while i < len(text):
nextMatch = self._getNextChar(text, i)
getResult()
i = i + len(nextMatch)
prevMatch = curMatch
curMatch = nextMatch
nextMatch = None
getResult()
return result |
def _equivalent(self, char, prev, next, implicitA):
""" Transliterate a Latin character equivalent to Devanagari.
Add VIRAMA for ligatures.
Convert standalone to dependent vowels.
"""
result = []
if char.isVowel == False:
result.append(char.chr)
if char.isConsonant \
and ((next is not None and next.isConsonant) \
or next is None):
result.append(DevanagariCharacter._VIRAMA)
else:
if prev is None or prev.isConsonant == False:
result.append(char.chr)
else:
if char._dependentVowel is not None:
result.append(char._dependentVowel)
return result |
def from_devanagari(self, data):
"""A convenience method"""
from indic_transliteration import sanscript
return sanscript.transliterate(data=data, _from=sanscript.DEVANAGARI, _to=self.name) |
def generate(grammar=None, num=1, output=sys.stdout, max_recursion=10, seed=None):
"""Load and generate ``num`` number of top-level rules from the specified grammar.
:param list grammar: The grammar file to load and generate data from
:param int num: The number of times to generate data
:param output: The output destination (an open, writable stream-type object. default=``sys.stdout``)
:param int max_recursion: The maximum reference-recursion when generating data (default=``10``)
:param int seed: The seed to initialize the PRNG with. If None, will not initialize it.
"""
if seed is not None:
gramfuzz.rand.seed(seed)
fuzzer = gramfuzz.GramFuzzer()
fuzzer.load_grammar(grammar)
cat_group = os.path.basename(grammar).replace(".py", "")
results = fuzzer.gen(cat_group=cat_group, num=num, max_recursion=max_recursion)
for res in results:
output.write(res) |
def build(self, pre=None, shortest=False):
"""Build the ``Quote`` instance
:param list pre: The prerequisites list
:param bool shortest: Whether or not the shortest reference-chain (most minimal) version of the field should be generated.
"""
res = super(Q, self).build(pre, shortest=shortest)
if self.escape:
return repr(res)
elif self.html_js_escape:
return ("'" + res.encode("string_escape").replace("<", "\\x3c").replace(">", "\\x3e") + "'")
else:
return "".join([self.quote, res, self.quote]) |
def make_present_participles(verbs):
"""Make the list of verbs into present participles
E.g.:
empower -> empowering
drive -> driving
"""
res = []
for verb in verbs:
parts = verb.split()
if parts[0].endswith("e"):
parts[0] = parts[0][:-1] + "ing"
else:
parts[0] = parts[0] + "ing"
res.append(" ".join(parts))
return res |
def clear_sent_messages(self, offset=None):
""" Deletes sent MailerMessage records """
if offset is None:
offset = getattr(settings, 'MAILQUEUE_CLEAR_OFFSET', defaults.MAILQUEUE_CLEAR_OFFSET)
if type(offset) is int:
offset = datetime.timedelta(hours=offset)
delete_before = timezone.now() - offset
self.filter(sent=True, last_attempt__lte=delete_before).delete() |
def unicodevalues_asstring(values):
""" Return string with unicodenames (unless that is disabled) """
if not os.environ.get('DISABLE_UNAMES'):
return map(lambda x: '%s' % format(x).strip(), values)
return map(lambda x: u'U+%04x %s' % (x, unichr(x)), sorted(values)) |
def _loadNamelistIncludes(item, unique_glyphs, cache):
"""Load the includes of an encoding Namelist files.
This is an implementation detail of readNamelist.
"""
includes = item["includes"] = []
charset = item["charset"] = set() | item["ownCharset"]
noCharcode = item["noCharcode"] = set() | item["ownNoCharcode"]
dirname = os.path.dirname(item["fileName"])
for include in item["header"]["includes"]:
includeFile = os.path.join(dirname, include)
try:
includedItem = readNamelist(includeFile, unique_glyphs, cache)
except NamelistRecursionError:
continue
if includedItem in includes:
continue
includes.append(includedItem)
charset |= includedItem["charset"]
noCharcode |= includedItem["ownNoCharcode"]
return item |
def __readNamelist(cache, filename, unique_glyphs):
"""Return a dict with the data of an encoding Namelist file.
This is an implementation detail of readNamelist.
"""
if filename in cache:
item = cache[filename]
else:
cps, header, noncodes = parseNamelist(filename)
item = {
"fileName": filename
, "ownCharset": cps
, "header": header
, "ownNoCharcode": noncodes
, "includes": None # placeholder
, "charset": None # placeholder
, "noCharcode": None
}
cache[filename] = item
if unique_glyphs or item["charset"] is not None:
return item
# full-charset/includes are requested and not cached yet
_loadNamelistIncludes(item, unique_glyphs, cache)
return item |
def _readNamelist(currentlyIncluding, cache, namFilename, unique_glyphs):
""" Detect infinite recursion and prevent it.
This is an implementation detail of readNamelist.
Raises NamelistRecursionError if namFilename is in the process of being included
"""
# normalize
filename = os.path.abspath(os.path.normcase(namFilename))
if filename in currentlyIncluding:
raise NamelistRecursionError(filename)
currentlyIncluding.add(filename)
try:
result = __readNamelist(cache, filename, unique_glyphs)
finally:
currentlyIncluding.remove(filename)
return result |
def readNamelist(namFilename, unique_glyphs=False, cache=None):
"""
Args:
namFilename: The path to the Namelist file.
unique_glyphs: Optional, whether to only include glyphs unique to subset.
cache: Optional, a dict used to cache loaded Namelist files
Returns:
A dict with following keys:
"fileName": (string) absolut path to namFilename
"ownCharset": (set) the set of codepoints defined by the file itself
"header": (dict) the result of _parseNamelistHeader
"includes":
(set) if unique_glyphs=False, the resulting dicts of readNamelist
for each of the include files
(None) if unique_glyphs=True
"charset":
(set) if unique_glyphs=False, the union of "ownCharset" and all
"charset" items of each included file
(None) if unique_glyphs=True
If you are using unique_glyphs=True and an external cache, don't expect
the keys "includes" and "charset" to have a specific value.
Depending on the state of cache, if unique_glyphs=True the returned
dict may have None values for its "includes" and "charset" keys.
"""
currentlyIncluding = set()
if not cache:
cache = {}
return _readNamelist(currentlyIncluding, cache, namFilename, unique_glyphs) |
def codepointsInNamelist(namFilename, unique_glyphs=False, cache=None):
"""Returns the set of codepoints contained in a given Namelist file.
This is a replacement CodepointsInSubset and implements the "#$ include"
header format.
Args:
namFilename: The path to the Namelist file.
unique_glyphs: Optional, whether to only include glyphs unique to subset.
Returns:
A set containing the glyphs in the subset.
"""
key = 'charset' if not unique_glyphs else 'ownCharset'
internals_dir = os.path.dirname(os.path.abspath(__file__))
target = os.path.join(internals_dir, namFilename)
result = readNamelist(target, unique_glyphs, cache)
return result[key] |
def get_orthographies(self, _library=library):
''' Returns list of CharsetInfo about supported orthographies '''
results = []
for charset in _library.charsets:
if self._charsets:
cn = getattr(charset, 'common_name', False)
abbr = getattr(charset, 'abbreviation', False)
nn = getattr(charset, 'short_name', False)
naive = getattr(charset, 'native_name', False)
if cn and cn.lower() in self._charsets:
results.append(charset)
elif nn and nn.lower() in self._charsets:
results.append(charset)
elif naive and naive.lower() in self._charsets:
results.append(charset)
elif abbr and abbr.lower() in self._charsets:
results.append(charset)
else:
results.append(charset)
for result in results:
yield CharsetInfo(self, result) |
def get_codepoints():
""" Return all XML <scanning-codepoints> in received XML """
# response = requests.get(EXTENSIS_LANG_XML)
# if response.status_code != 200:
# return []
path = get_from_cache('languages.xml', EXTENSIS_LANG_XML)
try:
xml_content = open(path, 'r').read()
except IOError:
logging.error('Could not read languages.xml from cache')
xml_content = ''
content = re.sub('<!--.[^>]*-->', '', xml_content)
doc = etree.fromstring(content.lstrip('`'))
return doc.findall('.//scanning-codepoints') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.