repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
denisenkom/pytds
src/pytds/tds.py
_TdsSession.submit_rpc
def submit_rpc(self, rpc_name, params, flags=0): """ Sends an RPC request. This call will transition session into pending state. If some operation is currently pending on the session, it will be cancelled before sending this request. Spec: http://msdn.microsoft.com/en-us/library/dd357576.aspx :param rpc_name: Name of the RPC to call, can be an instance of :class:`InternalProc` :param params: Stored proc parameters, should be a list of :class:`Column` instances. :param flags: See spec for possible flags. """ logger.info('Sending RPC %s flags=%d', rpc_name, flags) self.messages = [] self.output_params = {} self.cancel_if_pending() self.res_info = None w = self._writer with self.querying_context(tds_base.PacketType.RPC): if tds_base.IS_TDS72_PLUS(self): self._start_query() if tds_base.IS_TDS71_PLUS(self) and isinstance(rpc_name, tds_base.InternalProc): w.put_smallint(-1) w.put_smallint(rpc_name.proc_id) else: if isinstance(rpc_name, tds_base.InternalProc): rpc_name = rpc_name.name w.put_smallint(len(rpc_name)) w.write_ucs2(rpc_name) # # TODO support flags # bit 0 (1 as flag) in TDS7/TDS5 is "recompile" # bit 1 (2 as flag) in TDS7+ is "no metadata" bit this will prevent sending of column infos # w.put_usmallint(flags) self._out_params_indexes = [] for i, param in enumerate(params): if param.flags & tds_base.fByRefValue: self._out_params_indexes.append(i) w.put_byte(len(param.column_name)) w.write_ucs2(param.column_name) # # TODO support other flags (use defaul null/no metadata) # bit 1 (2 as flag) in TDS7+ is "default value" bit # (what's the meaning of "default value" ?) # w.put_byte(param.flags) # TYPE_INFO structure: https://msdn.microsoft.com/en-us/library/dd358284.aspx serializer = param.choose_serializer( type_factory=self._tds.type_factory, collation=self._tds.collation or raw_collation ) type_id = serializer.type w.put_byte(type_id) serializer.write_info(w) serializer.write(w, param.value)
python
def submit_rpc(self, rpc_name, params, flags=0): """ Sends an RPC request. This call will transition session into pending state. If some operation is currently pending on the session, it will be cancelled before sending this request. Spec: http://msdn.microsoft.com/en-us/library/dd357576.aspx :param rpc_name: Name of the RPC to call, can be an instance of :class:`InternalProc` :param params: Stored proc parameters, should be a list of :class:`Column` instances. :param flags: See spec for possible flags. """ logger.info('Sending RPC %s flags=%d', rpc_name, flags) self.messages = [] self.output_params = {} self.cancel_if_pending() self.res_info = None w = self._writer with self.querying_context(tds_base.PacketType.RPC): if tds_base.IS_TDS72_PLUS(self): self._start_query() if tds_base.IS_TDS71_PLUS(self) and isinstance(rpc_name, tds_base.InternalProc): w.put_smallint(-1) w.put_smallint(rpc_name.proc_id) else: if isinstance(rpc_name, tds_base.InternalProc): rpc_name = rpc_name.name w.put_smallint(len(rpc_name)) w.write_ucs2(rpc_name) # # TODO support flags # bit 0 (1 as flag) in TDS7/TDS5 is "recompile" # bit 1 (2 as flag) in TDS7+ is "no metadata" bit this will prevent sending of column infos # w.put_usmallint(flags) self._out_params_indexes = [] for i, param in enumerate(params): if param.flags & tds_base.fByRefValue: self._out_params_indexes.append(i) w.put_byte(len(param.column_name)) w.write_ucs2(param.column_name) # # TODO support other flags (use defaul null/no metadata) # bit 1 (2 as flag) in TDS7+ is "default value" bit # (what's the meaning of "default value" ?) # w.put_byte(param.flags) # TYPE_INFO structure: https://msdn.microsoft.com/en-us/library/dd358284.aspx serializer = param.choose_serializer( type_factory=self._tds.type_factory, collation=self._tds.collation or raw_collation ) type_id = serializer.type w.put_byte(type_id) serializer.write_info(w) serializer.write(w, param.value)
Sends an RPC request. This call will transition session into pending state. If some operation is currently pending on the session, it will be cancelled before sending this request. Spec: http://msdn.microsoft.com/en-us/library/dd357576.aspx :param rpc_name: Name of the RPC to call, can be an instance of :class:`InternalProc` :param params: Stored proc parameters, should be a list of :class:`Column` instances. :param flags: See spec for possible flags.
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds.py#L974-L1032
denisenkom/pytds
src/pytds/tds.py
_TdsSession.submit_plain_query
def submit_plain_query(self, operation): """ Sends a plain query to server. This call will transition session into pending state. If some operation is currently pending on the session, it will be cancelled before sending this request. Spec: http://msdn.microsoft.com/en-us/library/dd358575.aspx :param operation: A string representing sql statement. """ self.messages = [] self.cancel_if_pending() self.res_info = None logger.info("Sending query %s", operation[:100]) w = self._writer with self.querying_context(tds_base.PacketType.QUERY): if tds_base.IS_TDS72_PLUS(self): self._start_query() w.write_ucs2(operation)
python
def submit_plain_query(self, operation): """ Sends a plain query to server. This call will transition session into pending state. If some operation is currently pending on the session, it will be cancelled before sending this request. Spec: http://msdn.microsoft.com/en-us/library/dd358575.aspx :param operation: A string representing sql statement. """ self.messages = [] self.cancel_if_pending() self.res_info = None logger.info("Sending query %s", operation[:100]) w = self._writer with self.querying_context(tds_base.PacketType.QUERY): if tds_base.IS_TDS72_PLUS(self): self._start_query() w.write_ucs2(operation)
Sends a plain query to server. This call will transition session into pending state. If some operation is currently pending on the session, it will be cancelled before sending this request. Spec: http://msdn.microsoft.com/en-us/library/dd358575.aspx :param operation: A string representing sql statement.
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds.py#L1034-L1053
denisenkom/pytds
src/pytds/tds.py
_TdsSession.submit_bulk
def submit_bulk(self, metadata, rows): """ Sends insert bulk command. Spec: http://msdn.microsoft.com/en-us/library/dd358082.aspx :param metadata: A list of :class:`Column` instances. :param rows: A collection of rows, each row is a collection of values. :return: """ logger.info('Sending INSERT BULK') num_cols = len(metadata) w = self._writer serializers = [] with self.querying_context(tds_base.PacketType.BULK): w.put_byte(tds_base.TDS7_RESULT_TOKEN) w.put_usmallint(num_cols) for col in metadata: if tds_base.IS_TDS72_PLUS(self): w.put_uint(col.column_usertype) else: w.put_usmallint(col.column_usertype) w.put_usmallint(col.flags) serializer = col.choose_serializer( type_factory=self._tds.type_factory, collation=self._tds.collation, ) type_id = serializer.type w.put_byte(type_id) serializers.append(serializer) serializer.write_info(w) w.put_byte(len(col.column_name)) w.write_ucs2(col.column_name) for row in rows: w.put_byte(tds_base.TDS_ROW_TOKEN) for i, col in enumerate(metadata): serializers[i].write(w, row[i]) # https://msdn.microsoft.com/en-us/library/dd340421.aspx w.put_byte(tds_base.TDS_DONE_TOKEN) w.put_usmallint(tds_base.TDS_DONE_FINAL) w.put_usmallint(0) # curcmd # row count if tds_base.IS_TDS72_PLUS(self): w.put_int8(0) else: w.put_int(0)
python
def submit_bulk(self, metadata, rows): """ Sends insert bulk command. Spec: http://msdn.microsoft.com/en-us/library/dd358082.aspx :param metadata: A list of :class:`Column` instances. :param rows: A collection of rows, each row is a collection of values. :return: """ logger.info('Sending INSERT BULK') num_cols = len(metadata) w = self._writer serializers = [] with self.querying_context(tds_base.PacketType.BULK): w.put_byte(tds_base.TDS7_RESULT_TOKEN) w.put_usmallint(num_cols) for col in metadata: if tds_base.IS_TDS72_PLUS(self): w.put_uint(col.column_usertype) else: w.put_usmallint(col.column_usertype) w.put_usmallint(col.flags) serializer = col.choose_serializer( type_factory=self._tds.type_factory, collation=self._tds.collation, ) type_id = serializer.type w.put_byte(type_id) serializers.append(serializer) serializer.write_info(w) w.put_byte(len(col.column_name)) w.write_ucs2(col.column_name) for row in rows: w.put_byte(tds_base.TDS_ROW_TOKEN) for i, col in enumerate(metadata): serializers[i].write(w, row[i]) # https://msdn.microsoft.com/en-us/library/dd340421.aspx w.put_byte(tds_base.TDS_DONE_TOKEN) w.put_usmallint(tds_base.TDS_DONE_FINAL) w.put_usmallint(0) # curcmd # row count if tds_base.IS_TDS72_PLUS(self): w.put_int8(0) else: w.put_int(0)
Sends insert bulk command. Spec: http://msdn.microsoft.com/en-us/library/dd358082.aspx :param metadata: A list of :class:`Column` instances. :param rows: A collection of rows, each row is a collection of values. :return:
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds.py#L1055-L1100
denisenkom/pytds
src/pytds/tds.py
_TdsSession.put_cancel
def put_cancel(self): """ Sends a cancel request to the server. Switches connection to IN_CANCEL state. """ logger.info('Sending CANCEL') self._writer.begin_packet(tds_base.PacketType.CANCEL) self._writer.flush() self.in_cancel = 1
python
def put_cancel(self): """ Sends a cancel request to the server. Switches connection to IN_CANCEL state. """ logger.info('Sending CANCEL') self._writer.begin_packet(tds_base.PacketType.CANCEL) self._writer.flush() self.in_cancel = 1
Sends a cancel request to the server. Switches connection to IN_CANCEL state.
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds.py#L1102-L1110
denisenkom/pytds
src/pytds/tds_base.py
iterdecode
def iterdecode(iterable, codec): """ Uses an incremental decoder to decode each chunk in iterable. This function is a generator. :param iterable: Iterable object which yields raw data to be decoded :param codec: An instance of codec """ decoder = codec.incrementaldecoder() for chunk in iterable: yield decoder.decode(chunk) yield decoder.decode(b'', True)
python
def iterdecode(iterable, codec): """ Uses an incremental decoder to decode each chunk in iterable. This function is a generator. :param iterable: Iterable object which yields raw data to be decoded :param codec: An instance of codec """ decoder = codec.incrementaldecoder() for chunk in iterable: yield decoder.decode(chunk) yield decoder.decode(b'', True)
Uses an incremental decoder to decode each chunk in iterable. This function is a generator. :param iterable: Iterable object which yields raw data to be decoded :param codec: An instance of codec
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds_base.py#L317-L327
denisenkom/pytds
src/pytds/tds_base.py
skipall
def skipall(stm, size): """ Skips exactly size bytes in stm If EOF is reached before size bytes are skipped will raise :class:`ClosedConnectionError` :param stm: Stream to skip bytes in, should have read method this read method can return less than requested number of bytes. :param size: Number of bytes to skip. """ res = stm.recv(size) if len(res) == size: return elif len(res) == 0: raise ClosedConnectionError() left = size - len(res) while left: buf = stm.recv(left) if len(buf) == 0: raise ClosedConnectionError() left -= len(buf)
python
def skipall(stm, size): """ Skips exactly size bytes in stm If EOF is reached before size bytes are skipped will raise :class:`ClosedConnectionError` :param stm: Stream to skip bytes in, should have read method this read method can return less than requested number of bytes. :param size: Number of bytes to skip. """ res = stm.recv(size) if len(res) == size: return elif len(res) == 0: raise ClosedConnectionError() left = size - len(res) while left: buf = stm.recv(left) if len(buf) == 0: raise ClosedConnectionError() left -= len(buf)
Skips exactly size bytes in stm If EOF is reached before size bytes are skipped will raise :class:`ClosedConnectionError` :param stm: Stream to skip bytes in, should have read method this read method can return less than requested number of bytes. :param size: Number of bytes to skip.
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds_base.py#L496-L517
denisenkom/pytds
src/pytds/tds_base.py
read_chunks
def read_chunks(stm, size): """ Reads exactly size bytes from stm and produces chunks May call stm.read multiple times until required number of bytes is read. If EOF is reached before size bytes are read will raise :class:`ClosedConnectionError` :param stm: Stream to read bytes from, should have read method, this read method can return less than requested number of bytes. :param size: Number of bytes to read. """ if size == 0: yield b'' return res = stm.recv(size) if len(res) == 0: raise ClosedConnectionError() yield res left = size - len(res) while left: buf = stm.recv(left) if len(buf) == 0: raise ClosedConnectionError() yield buf left -= len(buf)
python
def read_chunks(stm, size): """ Reads exactly size bytes from stm and produces chunks May call stm.read multiple times until required number of bytes is read. If EOF is reached before size bytes are read will raise :class:`ClosedConnectionError` :param stm: Stream to read bytes from, should have read method, this read method can return less than requested number of bytes. :param size: Number of bytes to read. """ if size == 0: yield b'' return res = stm.recv(size) if len(res) == 0: raise ClosedConnectionError() yield res left = size - len(res) while left: buf = stm.recv(left) if len(buf) == 0: raise ClosedConnectionError() yield buf left -= len(buf)
Reads exactly size bytes from stm and produces chunks May call stm.read multiple times until required number of bytes is read. If EOF is reached before size bytes are read will raise :class:`ClosedConnectionError` :param stm: Stream to read bytes from, should have read method, this read method can return less than requested number of bytes. :param size: Number of bytes to read.
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds_base.py#L520-L547
denisenkom/pytds
src/pytds/tds_base.py
readall_fast
def readall_fast(stm, size): """ Slightly faster version of readall, it reads no more than two chunks. Meaning that it can only be used to read small data that doesn't span more that two packets. :param stm: Stream to read from, should have read method. :param size: Number of bytes to read. :return: """ buf, offset = stm.read_fast(size) if len(buf) - offset < size: # slow case buf = buf[offset:] buf += stm.recv(size - len(buf)) return buf, 0 return buf, offset
python
def readall_fast(stm, size): """ Slightly faster version of readall, it reads no more than two chunks. Meaning that it can only be used to read small data that doesn't span more that two packets. :param stm: Stream to read from, should have read method. :param size: Number of bytes to read. :return: """ buf, offset = stm.read_fast(size) if len(buf) - offset < size: # slow case buf = buf[offset:] buf += stm.recv(size - len(buf)) return buf, 0 return buf, offset
Slightly faster version of readall, it reads no more than two chunks. Meaning that it can only be used to read small data that doesn't span more that two packets. :param stm: Stream to read from, should have read method. :param size: Number of bytes to read. :return:
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds_base.py#L567-L583
denisenkom/pytds
src/pytds/tds_types.py
_decode_num
def _decode_num(buf): """ Decodes little-endian integer from buffer Buffer can be of any size """ return functools.reduce(lambda acc, val: acc * 256 + tds_base.my_ord(val), reversed(buf), 0)
python
def _decode_num(buf): """ Decodes little-endian integer from buffer Buffer can be of any size """ return functools.reduce(lambda acc, val: acc * 256 + tds_base.my_ord(val), reversed(buf), 0)
Decodes little-endian integer from buffer Buffer can be of any size
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds_types.py#L29-L34
denisenkom/pytds
src/pytds/tds_types.py
PlpReader.chunks
def chunks(self): """ Generates chunks from stream, each chunk is an instace of bytes. """ if self.is_null(): return total = 0 while True: chunk_len = self._rdr.get_uint() if chunk_len == 0: if not self.is_unknown_len() and total != self._size: msg = "PLP actual length (%d) doesn't match reported length (%d)" % (total, self._size) self._rdr.session.bad_stream(msg) return total += chunk_len left = chunk_len while left: buf = self._rdr.recv(left) yield buf left -= len(buf)
python
def chunks(self): """ Generates chunks from stream, each chunk is an instace of bytes. """ if self.is_null(): return total = 0 while True: chunk_len = self._rdr.get_uint() if chunk_len == 0: if not self.is_unknown_len() and total != self._size: msg = "PLP actual length (%d) doesn't match reported length (%d)" % (total, self._size) self._rdr.session.bad_stream(msg) return total += chunk_len left = chunk_len while left: buf = self._rdr.recv(left) yield buf left -= len(buf)
Generates chunks from stream, each chunk is an instace of bytes.
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds_types.py#L68-L88
denisenkom/pytds
src/pytds/tds_types.py
Date.from_pydate
def from_pydate(cls, pydate): """ Creates sql date object from Python date object. @param pydate: Python date @return: sql date """ return cls(days=(datetime.datetime.combine(pydate, datetime.time(0, 0, 0)) - _datetime2_base_date).days)
python
def from_pydate(cls, pydate): """ Creates sql date object from Python date object. @param pydate: Python date @return: sql date """ return cls(days=(datetime.datetime.combine(pydate, datetime.time(0, 0, 0)) - _datetime2_base_date).days)
Creates sql date object from Python date object. @param pydate: Python date @return: sql date
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds_types.py#L1484-L1490
denisenkom/pytds
src/pytds/tds_types.py
Time.to_pytime
def to_pytime(self): """ Converts sql time object into Python's time object this will truncate nanoseconds to microseconds @return: naive time """ nanoseconds = self._nsec hours = nanoseconds // 1000000000 // 60 // 60 nanoseconds -= hours * 60 * 60 * 1000000000 minutes = nanoseconds // 1000000000 // 60 nanoseconds -= minutes * 60 * 1000000000 seconds = nanoseconds // 1000000000 nanoseconds -= seconds * 1000000000 return datetime.time(hours, minutes, seconds, nanoseconds // 1000)
python
def to_pytime(self): """ Converts sql time object into Python's time object this will truncate nanoseconds to microseconds @return: naive time """ nanoseconds = self._nsec hours = nanoseconds // 1000000000 // 60 // 60 nanoseconds -= hours * 60 * 60 * 1000000000 minutes = nanoseconds // 1000000000 // 60 nanoseconds -= minutes * 60 * 1000000000 seconds = nanoseconds // 1000000000 nanoseconds -= seconds * 1000000000 return datetime.time(hours, minutes, seconds, nanoseconds // 1000)
Converts sql time object into Python's time object this will truncate nanoseconds to microseconds @return: naive time
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds_types.py#L1521-L1534
denisenkom/pytds
src/pytds/tds_types.py
Time.from_pytime
def from_pytime(cls, pytime): """ Converts Python time object to sql time object ignoring timezone @param pytime: Python time object @return: sql time object """ secs = pytime.hour * 60 * 60 + pytime.minute * 60 + pytime.second nsec = secs * 10 ** 9 + pytime.microsecond * 1000 return cls(nsec=nsec)
python
def from_pytime(cls, pytime): """ Converts Python time object to sql time object ignoring timezone @param pytime: Python time object @return: sql time object """ secs = pytime.hour * 60 * 60 + pytime.minute * 60 + pytime.second nsec = secs * 10 ** 9 + pytime.microsecond * 1000 return cls(nsec=nsec)
Converts Python time object to sql time object ignoring timezone @param pytime: Python time object @return: sql time object
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds_types.py#L1537-L1546
denisenkom/pytds
src/pytds/tds_types.py
DateTime2.to_pydatetime
def to_pydatetime(self): """ Converts datetime2 object into Python's datetime.datetime object @return: naive datetime.datetime """ return datetime.datetime.combine(self._date.to_pydate(), self._time.to_pytime())
python
def to_pydatetime(self): """ Converts datetime2 object into Python's datetime.datetime object @return: naive datetime.datetime """ return datetime.datetime.combine(self._date.to_pydate(), self._time.to_pytime())
Converts datetime2 object into Python's datetime.datetime object @return: naive datetime.datetime
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds_types.py#L1583-L1588
denisenkom/pytds
src/pytds/tds_types.py
DateTime2.from_pydatetime
def from_pydatetime(cls, pydatetime): """ Creates sql datetime2 object from Python datetime object ignoring timezone @param pydatetime: Python datetime object @return: sql datetime2 object """ return cls(date=Date.from_pydate(pydatetime.date), time=Time.from_pytime(pydatetime.time))
python
def from_pydatetime(cls, pydatetime): """ Creates sql datetime2 object from Python datetime object ignoring timezone @param pydatetime: Python datetime object @return: sql datetime2 object """ return cls(date=Date.from_pydate(pydatetime.date), time=Time.from_pytime(pydatetime.time))
Creates sql datetime2 object from Python datetime object ignoring timezone @param pydatetime: Python datetime object @return: sql datetime2 object
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds_types.py#L1591-L1599
denisenkom/pytds
src/pytds/tds_types.py
DateTimeOffset.to_pydatetime
def to_pydatetime(self): """ Converts datetimeoffset object into Python's datetime.datetime object @return: time zone aware datetime.datetime """ dt = datetime.datetime.combine(self._date.to_pydate(), self._time.to_pytime()) from .tz import FixedOffsetTimezone return dt.replace(tzinfo=_utc).astimezone(FixedOffsetTimezone(self._offset))
python
def to_pydatetime(self): """ Converts datetimeoffset object into Python's datetime.datetime object @return: time zone aware datetime.datetime """ dt = datetime.datetime.combine(self._date.to_pydate(), self._time.to_pytime()) from .tz import FixedOffsetTimezone return dt.replace(tzinfo=_utc).astimezone(FixedOffsetTimezone(self._offset))
Converts datetimeoffset object into Python's datetime.datetime object @return: time zone aware datetime.datetime
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds_types.py#L1628-L1635
denisenkom/pytds
src/pytds/tds_types.py
TableSerializer.write_info
def write_info(self, w): """ Writes TVP_TYPENAME structure spec: https://msdn.microsoft.com/en-us/library/dd302994.aspx @param w: TdsWriter @return: """ w.write_b_varchar("") # db_name, should be empty w.write_b_varchar(self._table_type.typ_schema) w.write_b_varchar(self._table_type.typ_name)
python
def write_info(self, w): """ Writes TVP_TYPENAME structure spec: https://msdn.microsoft.com/en-us/library/dd302994.aspx @param w: TdsWriter @return: """ w.write_b_varchar("") # db_name, should be empty w.write_b_varchar(self._table_type.typ_schema) w.write_b_varchar(self._table_type.typ_name)
Writes TVP_TYPENAME structure spec: https://msdn.microsoft.com/en-us/library/dd302994.aspx @param w: TdsWriter @return:
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds_types.py#L2245-L2255
denisenkom/pytds
src/pytds/tds_types.py
TableSerializer.write
def write(self, w, val): """ Writes remaining part of TVP_TYPE_INFO structure, resuming from TVP_COLMETADATA specs: https://msdn.microsoft.com/en-us/library/dd302994.aspx https://msdn.microsoft.com/en-us/library/dd305261.aspx https://msdn.microsoft.com/en-us/library/dd303230.aspx @param w: TdsWriter @param val: TableValuedParam or None @return: """ if val.is_null(): w.put_usmallint(tds_base.TVP_NULL_TOKEN) else: columns = self._table_type.columns w.put_usmallint(len(columns)) for i, column in enumerate(columns): w.put_uint(column.column_usertype) w.put_usmallint(column.flags) # TYPE_INFO structure: https://msdn.microsoft.com/en-us/library/dd358284.aspx serializer = self._columns_serializers[i] type_id = serializer.type w.put_byte(type_id) serializer.write_info(w) w.write_b_varchar('') # ColName, must be empty in TVP according to spec # here can optionally send TVP_ORDER_UNIQUE and TVP_COLUMN_ORDERING # https://msdn.microsoft.com/en-us/library/dd305261.aspx # terminating optional metadata w.put_byte(tds_base.TVP_END_TOKEN) # now sending rows using TVP_ROW # https://msdn.microsoft.com/en-us/library/dd305261.aspx if val.rows: for row in val.rows: w.put_byte(tds_base.TVP_ROW_TOKEN) for i, col in enumerate(self._table_type.columns): if not col.flags & tds_base.TVP_COLUMN_DEFAULT_FLAG: self._columns_serializers[i].write(w, row[i]) # terminating rows w.put_byte(tds_base.TVP_END_TOKEN)
python
def write(self, w, val): """ Writes remaining part of TVP_TYPE_INFO structure, resuming from TVP_COLMETADATA specs: https://msdn.microsoft.com/en-us/library/dd302994.aspx https://msdn.microsoft.com/en-us/library/dd305261.aspx https://msdn.microsoft.com/en-us/library/dd303230.aspx @param w: TdsWriter @param val: TableValuedParam or None @return: """ if val.is_null(): w.put_usmallint(tds_base.TVP_NULL_TOKEN) else: columns = self._table_type.columns w.put_usmallint(len(columns)) for i, column in enumerate(columns): w.put_uint(column.column_usertype) w.put_usmallint(column.flags) # TYPE_INFO structure: https://msdn.microsoft.com/en-us/library/dd358284.aspx serializer = self._columns_serializers[i] type_id = serializer.type w.put_byte(type_id) serializer.write_info(w) w.write_b_varchar('') # ColName, must be empty in TVP according to spec # here can optionally send TVP_ORDER_UNIQUE and TVP_COLUMN_ORDERING # https://msdn.microsoft.com/en-us/library/dd305261.aspx # terminating optional metadata w.put_byte(tds_base.TVP_END_TOKEN) # now sending rows using TVP_ROW # https://msdn.microsoft.com/en-us/library/dd305261.aspx if val.rows: for row in val.rows: w.put_byte(tds_base.TVP_ROW_TOKEN) for i, col in enumerate(self._table_type.columns): if not col.flags & tds_base.TVP_COLUMN_DEFAULT_FLAG: self._columns_serializers[i].write(w, row[i]) # terminating rows w.put_byte(tds_base.TVP_END_TOKEN)
Writes remaining part of TVP_TYPE_INFO structure, resuming from TVP_COLMETADATA specs: https://msdn.microsoft.com/en-us/library/dd302994.aspx https://msdn.microsoft.com/en-us/library/dd305261.aspx https://msdn.microsoft.com/en-us/library/dd303230.aspx @param w: TdsWriter @param val: TableValuedParam or None @return:
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds_types.py#L2257-L2305
denisenkom/pytds
src/pytds/tds_types.py
DeclarationsParser.parse
def parse(self, declaration): """ Parse sql type declaration, e.g. varchar(10) and return instance of corresponding type class, e.g. VarCharType(10) @param declaration: Sql declaration to parse, e.g. varchar(10) @return: instance of SqlTypeMetaclass """ declaration = declaration.strip() for regex, constructor in self._compiled: m = regex.match(declaration) if m: return constructor(*m.groups()) raise ValueError('Unable to parse type declaration', declaration)
python
def parse(self, declaration): """ Parse sql type declaration, e.g. varchar(10) and return instance of corresponding type class, e.g. VarCharType(10) @param declaration: Sql declaration to parse, e.g. varchar(10) @return: instance of SqlTypeMetaclass """ declaration = declaration.strip() for regex, constructor in self._compiled: m = regex.match(declaration) if m: return constructor(*m.groups()) raise ValueError('Unable to parse type declaration', declaration)
Parse sql type declaration, e.g. varchar(10) and return instance of corresponding type class, e.g. VarCharType(10) @param declaration: Sql declaration to parse, e.g. varchar(10) @return: instance of SqlTypeMetaclass
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds_types.py#L2575-L2588
denisenkom/pytds
src/pytds/tds_types.py
TdsTypeInferrer.from_value
def from_value(self, value): """ Function infers TDS type from Python value. :param value: value from which to infer TDS type :return: An instance of subclass of :class:`BaseType` """ if value is None: sql_type = NVarCharType(size=1) else: sql_type = self._from_class_value(value, type(value)) return sql_type
python
def from_value(self, value): """ Function infers TDS type from Python value. :param value: value from which to infer TDS type :return: An instance of subclass of :class:`BaseType` """ if value is None: sql_type = NVarCharType(size=1) else: sql_type = self._from_class_value(value, type(value)) return sql_type
Function infers TDS type from Python value. :param value: value from which to infer TDS type :return: An instance of subclass of :class:`BaseType`
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds_types.py#L2609-L2619
denisenkom/pytds
src/pytds/__init__.py
dict_row_strategy
def dict_row_strategy(column_names): """ Dict row strategy, rows returned as dictionaries """ # replace empty column names with indices column_names = [(name or idx) for idx, name in enumerate(column_names)] def row_factory(row): return dict(zip(column_names, row)) return row_factory
python
def dict_row_strategy(column_names): """ Dict row strategy, rows returned as dictionaries """ # replace empty column names with indices column_names = [(name or idx) for idx, name in enumerate(column_names)] def row_factory(row): return dict(zip(column_names, row)) return row_factory
Dict row strategy, rows returned as dictionaries
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L88-L97
denisenkom/pytds
src/pytds/__init__.py
namedtuple_row_strategy
def namedtuple_row_strategy(column_names): """ Namedtuple row strategy, rows returned as named tuples Column names that are not valid Python identifiers will be replaced with col<number>_ """ import collections # replace empty column names with placeholders column_names = [name if is_valid_identifier(name) else 'col%s_' % idx for idx, name in enumerate(column_names)] row_class = collections.namedtuple('Row', column_names) def row_factory(row): return row_class(*row) return row_factory
python
def namedtuple_row_strategy(column_names): """ Namedtuple row strategy, rows returned as named tuples Column names that are not valid Python identifiers will be replaced with col<number>_ """ import collections # replace empty column names with placeholders column_names = [name if is_valid_identifier(name) else 'col%s_' % idx for idx, name in enumerate(column_names)] row_class = collections.namedtuple('Row', column_names) def row_factory(row): return row_class(*row) return row_factory
Namedtuple row strategy, rows returned as named tuples Column names that are not valid Python identifiers will be replaced with col<number>_
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L104-L118
denisenkom/pytds
src/pytds/__init__.py
recordtype_row_strategy
def recordtype_row_strategy(column_names): """ Recordtype row strategy, rows returned as recordtypes Column names that are not valid Python identifiers will be replaced with col<number>_ """ try: from namedlist import namedlist as recordtype # optional dependency except ImportError: from recordtype import recordtype # optional dependency # replace empty column names with placeholders column_names = [name if is_valid_identifier(name) else 'col%s_' % idx for idx, name in enumerate(column_names)] recordtype_row_class = recordtype('Row', column_names) # custom extension class that supports indexing class Row(recordtype_row_class): def __getitem__(self, index): if isinstance(index, slice): return tuple(getattr(self, x) for x in self.__slots__[index]) return getattr(self, self.__slots__[index]) def __setitem__(self, index, value): setattr(self, self.__slots__[index], value) def row_factory(row): return Row(*row) return row_factory
python
def recordtype_row_strategy(column_names): """ Recordtype row strategy, rows returned as recordtypes Column names that are not valid Python identifiers will be replaced with col<number>_ """ try: from namedlist import namedlist as recordtype # optional dependency except ImportError: from recordtype import recordtype # optional dependency # replace empty column names with placeholders column_names = [name if is_valid_identifier(name) else 'col%s_' % idx for idx, name in enumerate(column_names)] recordtype_row_class = recordtype('Row', column_names) # custom extension class that supports indexing class Row(recordtype_row_class): def __getitem__(self, index): if isinstance(index, slice): return tuple(getattr(self, x) for x in self.__slots__[index]) return getattr(self, self.__slots__[index]) def __setitem__(self, index, value): setattr(self, self.__slots__[index], value) def row_factory(row): return Row(*row) return row_factory
Recordtype row strategy, rows returned as recordtypes Column names that are not valid Python identifiers will be replaced with col<number>_
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L121-L148
denisenkom/pytds
src/pytds/__init__.py
_get_servers_deque
def _get_servers_deque(servers, database): """ Returns deque of servers for given tuple of servers and database name. This deque have active server at the begining, if first server is not accessible at the moment the deque will be rotated, second server will be moved to the first position, thirt to the second position etc, and previously first server will be moved to the last position. This allows to remember last successful server between calls to connect function. """ key = (servers, database) if key not in _servers_deques: _servers_deques[key] = deque(servers) return _servers_deques[key]
python
def _get_servers_deque(servers, database): """ Returns deque of servers for given tuple of servers and database name. This deque have active server at the begining, if first server is not accessible at the moment the deque will be rotated, second server will be moved to the first position, thirt to the second position etc, and previously first server will be moved to the last position. This allows to remember last successful server between calls to connect function. """ key = (servers, database) if key not in _servers_deques: _servers_deques[key] = deque(servers) return _servers_deques[key]
Returns deque of servers for given tuple of servers and database name. This deque have active server at the begining, if first server is not accessible at the moment the deque will be rotated, second server will be moved to the first position, thirt to the second position etc, and previously first server will be moved to the last position. This allows to remember last successful server between calls to connect function.
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L1107-L1121
denisenkom/pytds
src/pytds/__init__.py
_parse_connection_string
def _parse_connection_string(connstr): """ MSSQL style connection string parser Returns normalized dictionary of connection string parameters """ res = {} for item in connstr.split(';'): item = item.strip() if not item: continue key, value = item.split('=', 1) key = key.strip().lower().replace(' ', '_') value = value.strip() res[key] = value return res
python
def _parse_connection_string(connstr): """ MSSQL style connection string parser Returns normalized dictionary of connection string parameters """ res = {} for item in connstr.split(';'): item = item.strip() if not item: continue key, value = item.split('=', 1) key = key.strip().lower().replace(' ', '_') value = value.strip() res[key] = value return res
MSSQL style connection string parser Returns normalized dictionary of connection string parameters
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L1124-L1139
denisenkom/pytds
src/pytds/__init__.py
connect
def connect(dsn=None, database=None, user=None, password=None, timeout=None, login_timeout=15, as_dict=None, appname=None, port=None, tds_version=tds_base.TDS74, autocommit=False, blocksize=4096, use_mars=False, auth=None, readonly=False, load_balancer=None, use_tz=None, bytes_to_unicode=True, row_strategy=None, failover_partner=None, server=None, cafile=None, validate_host=True, enc_login_only=False, disable_connect_retry=False, pooling=False, ): """ Opens connection to the database :keyword dsn: SQL server host and instance: <host>[\<instance>] :type dsn: string :keyword failover_partner: secondary database host, used if primary is not accessible :type failover_partner: string :keyword database: the database to initially connect to :type database: string :keyword user: database user to connect as :type user: string :keyword password: user's password :type password: string :keyword timeout: query timeout in seconds, default 0 (no timeout) :type timeout: int :keyword login_timeout: timeout for connection and login in seconds, default 15 :type login_timeout: int :keyword as_dict: whether rows should be returned as dictionaries instead of tuples. :type as_dict: boolean :keyword appname: Set the application name to use for the connection :type appname: string :keyword port: the TCP port to use to connect to the server :type port: int :keyword tds_version: Maximum TDS version to use, should only be used for testing :type tds_version: int :keyword autocommit: Enable or disable database level autocommit :type autocommit: bool :keyword blocksize: Size of block for the TDS protocol, usually should not be used :type blocksize: int :keyword use_mars: Enable or disable MARS :type use_mars: bool :keyword auth: An instance of authentication method class, e.g. Ntlm or Sspi :keyword readonly: Allows to enable read-only mode for connection, only supported by MSSQL 2012, earlier versions will ignore this parameter :type readonly: bool :keyword load_balancer: An instance of load balancer class to use, if not provided will not use load balancer :keyword use_tz: Provides timezone for naive database times, if not provided date and time will be returned in naive format :keyword bytes_to_unicode: If true single byte database strings will be converted to unicode Python strings, otherwise will return strings as ``bytes`` without conversion. :type bytes_to_unicode: bool :keyword row_strategy: strategy used to create rows, determines type of returned rows, can be custom or one of: :func:`tuple_row_strategy`, :func:`list_row_strategy`, :func:`dict_row_strategy`, :func:`namedtuple_row_strategy`, :func:`recordtype_row_strategy` :type row_strategy: function of list of column names returning row factory :keyword cafile: Name of the file containing trusted CAs in PEM format, if provided will enable TLS :type cafile: str :keyword validate_host: Host name validation during TLS connection is enabled by default, if you disable it you will be vulnerable to MitM type of attack. :type validate_host: bool :keyword enc_login_only: Allows you to scope TLS encryption only to an authentication portion. This means that anyone who can observe traffic on your network will be able to see all your SQL requests and potentially modify them. :type enc_login_only: bool :returns: An instance of :class:`Connection` """ login = _TdsLogin() login.client_host_name = socket.gethostname()[:128] login.library = "Python TDS Library" login.user_name = user or '' login.password = password or '' login.app_name = appname or 'pytds' login.port = port login.language = '' # use database default login.attach_db_file = '' login.tds_version = tds_version if tds_version < tds_base.TDS70: raise ValueError('This TDS version is not supported') login.database = database or '' login.bulk_copy = False login.client_lcid = lcid.LANGID_ENGLISH_US login.use_mars = use_mars login.pid = os.getpid() login.change_password = '' login.client_id = uuid.getnode() # client mac address login.cafile = cafile login.validate_host = validate_host login.enc_login_only = enc_login_only if cafile: if not tls.OPENSSL_AVAILABLE: raise ValueError("You are trying to use encryption but pyOpenSSL does not work, you probably " "need to install it first") login.tls_ctx = tls.create_context(cafile) if login.enc_login_only: login.enc_flag = PreLoginEnc.ENCRYPT_OFF else: login.enc_flag = PreLoginEnc.ENCRYPT_ON else: login.tls_ctx = None login.enc_flag = PreLoginEnc.ENCRYPT_NOT_SUP if use_tz: login.client_tz = use_tz else: login.client_tz = pytds.tz.local # that will set: # ANSI_DEFAULTS to ON, # IMPLICIT_TRANSACTIONS to OFF, # TEXTSIZE to 0x7FFFFFFF (2GB) (TDS 7.2 and below), TEXTSIZE to infinite (introduced in TDS 7.3), # and ROWCOUNT to infinite login.option_flag2 = tds_base.TDS_ODBC_ON login.connect_timeout = login_timeout login.query_timeout = timeout login.blocksize = blocksize login.auth = auth login.readonly = readonly login.load_balancer = load_balancer login.bytes_to_unicode = bytes_to_unicode if server and dsn: raise ValueError("Both server and dsn shouldn't be specified") if server: warnings.warn("server parameter is deprecated, use dsn instead", DeprecationWarning) dsn = server if load_balancer and failover_partner: raise ValueError("Both load_balancer and failover_partner shoudln't be specified") if load_balancer: servers = [(srv, None) for srv in load_balancer.choose()] else: servers = [(dsn or 'localhost', port)] if failover_partner: servers.append((failover_partner, port)) parsed_servers = [] for srv, port in servers: host, instance = _parse_server(srv) if instance and port: raise ValueError("Both instance and port shouldn't be specified") parsed_servers.append((host, port, instance)) login.servers = _get_servers_deque(tuple(parsed_servers), database) # unique connection identifier used to pool connection key = ( dsn, login.user_name, login.app_name, login.tds_version, login.database, login.client_lcid, login.use_mars, login.cafile, login.blocksize, login.readonly, login.bytes_to_unicode, login.auth, login.client_tz, autocommit, ) conn = Connection() conn._use_tz = use_tz conn._autocommit = autocommit conn._login = login conn._pooling = pooling conn._key = key assert row_strategy is None or as_dict is None,\ 'Both row_startegy and as_dict were specified, you should use either one or another' if as_dict is not None: conn.as_dict = as_dict elif row_strategy is not None: conn._row_strategy = row_strategy else: conn._row_strategy = tuple_row_strategy # default row strategy conn._isolation_level = 0 conn._dirty = False from .tz import FixedOffsetTimezone conn._tzinfo_factory = None if use_tz is None else FixedOffsetTimezone if disable_connect_retry: conn._try_open(timeout=login.connect_timeout) else: conn._open() return conn
python
def connect(dsn=None, database=None, user=None, password=None, timeout=None, login_timeout=15, as_dict=None, appname=None, port=None, tds_version=tds_base.TDS74, autocommit=False, blocksize=4096, use_mars=False, auth=None, readonly=False, load_balancer=None, use_tz=None, bytes_to_unicode=True, row_strategy=None, failover_partner=None, server=None, cafile=None, validate_host=True, enc_login_only=False, disable_connect_retry=False, pooling=False, ): """ Opens connection to the database :keyword dsn: SQL server host and instance: <host>[\<instance>] :type dsn: string :keyword failover_partner: secondary database host, used if primary is not accessible :type failover_partner: string :keyword database: the database to initially connect to :type database: string :keyword user: database user to connect as :type user: string :keyword password: user's password :type password: string :keyword timeout: query timeout in seconds, default 0 (no timeout) :type timeout: int :keyword login_timeout: timeout for connection and login in seconds, default 15 :type login_timeout: int :keyword as_dict: whether rows should be returned as dictionaries instead of tuples. :type as_dict: boolean :keyword appname: Set the application name to use for the connection :type appname: string :keyword port: the TCP port to use to connect to the server :type port: int :keyword tds_version: Maximum TDS version to use, should only be used for testing :type tds_version: int :keyword autocommit: Enable or disable database level autocommit :type autocommit: bool :keyword blocksize: Size of block for the TDS protocol, usually should not be used :type blocksize: int :keyword use_mars: Enable or disable MARS :type use_mars: bool :keyword auth: An instance of authentication method class, e.g. Ntlm or Sspi :keyword readonly: Allows to enable read-only mode for connection, only supported by MSSQL 2012, earlier versions will ignore this parameter :type readonly: bool :keyword load_balancer: An instance of load balancer class to use, if not provided will not use load balancer :keyword use_tz: Provides timezone for naive database times, if not provided date and time will be returned in naive format :keyword bytes_to_unicode: If true single byte database strings will be converted to unicode Python strings, otherwise will return strings as ``bytes`` without conversion. :type bytes_to_unicode: bool :keyword row_strategy: strategy used to create rows, determines type of returned rows, can be custom or one of: :func:`tuple_row_strategy`, :func:`list_row_strategy`, :func:`dict_row_strategy`, :func:`namedtuple_row_strategy`, :func:`recordtype_row_strategy` :type row_strategy: function of list of column names returning row factory :keyword cafile: Name of the file containing trusted CAs in PEM format, if provided will enable TLS :type cafile: str :keyword validate_host: Host name validation during TLS connection is enabled by default, if you disable it you will be vulnerable to MitM type of attack. :type validate_host: bool :keyword enc_login_only: Allows you to scope TLS encryption only to an authentication portion. This means that anyone who can observe traffic on your network will be able to see all your SQL requests and potentially modify them. :type enc_login_only: bool :returns: An instance of :class:`Connection` """ login = _TdsLogin() login.client_host_name = socket.gethostname()[:128] login.library = "Python TDS Library" login.user_name = user or '' login.password = password or '' login.app_name = appname or 'pytds' login.port = port login.language = '' # use database default login.attach_db_file = '' login.tds_version = tds_version if tds_version < tds_base.TDS70: raise ValueError('This TDS version is not supported') login.database = database or '' login.bulk_copy = False login.client_lcid = lcid.LANGID_ENGLISH_US login.use_mars = use_mars login.pid = os.getpid() login.change_password = '' login.client_id = uuid.getnode() # client mac address login.cafile = cafile login.validate_host = validate_host login.enc_login_only = enc_login_only if cafile: if not tls.OPENSSL_AVAILABLE: raise ValueError("You are trying to use encryption but pyOpenSSL does not work, you probably " "need to install it first") login.tls_ctx = tls.create_context(cafile) if login.enc_login_only: login.enc_flag = PreLoginEnc.ENCRYPT_OFF else: login.enc_flag = PreLoginEnc.ENCRYPT_ON else: login.tls_ctx = None login.enc_flag = PreLoginEnc.ENCRYPT_NOT_SUP if use_tz: login.client_tz = use_tz else: login.client_tz = pytds.tz.local # that will set: # ANSI_DEFAULTS to ON, # IMPLICIT_TRANSACTIONS to OFF, # TEXTSIZE to 0x7FFFFFFF (2GB) (TDS 7.2 and below), TEXTSIZE to infinite (introduced in TDS 7.3), # and ROWCOUNT to infinite login.option_flag2 = tds_base.TDS_ODBC_ON login.connect_timeout = login_timeout login.query_timeout = timeout login.blocksize = blocksize login.auth = auth login.readonly = readonly login.load_balancer = load_balancer login.bytes_to_unicode = bytes_to_unicode if server and dsn: raise ValueError("Both server and dsn shouldn't be specified") if server: warnings.warn("server parameter is deprecated, use dsn instead", DeprecationWarning) dsn = server if load_balancer and failover_partner: raise ValueError("Both load_balancer and failover_partner shoudln't be specified") if load_balancer: servers = [(srv, None) for srv in load_balancer.choose()] else: servers = [(dsn or 'localhost', port)] if failover_partner: servers.append((failover_partner, port)) parsed_servers = [] for srv, port in servers: host, instance = _parse_server(srv) if instance and port: raise ValueError("Both instance and port shouldn't be specified") parsed_servers.append((host, port, instance)) login.servers = _get_servers_deque(tuple(parsed_servers), database) # unique connection identifier used to pool connection key = ( dsn, login.user_name, login.app_name, login.tds_version, login.database, login.client_lcid, login.use_mars, login.cafile, login.blocksize, login.readonly, login.bytes_to_unicode, login.auth, login.client_tz, autocommit, ) conn = Connection() conn._use_tz = use_tz conn._autocommit = autocommit conn._login = login conn._pooling = pooling conn._key = key assert row_strategy is None or as_dict is None,\ 'Both row_startegy and as_dict were specified, you should use either one or another' if as_dict is not None: conn.as_dict = as_dict elif row_strategy is not None: conn._row_strategy = row_strategy else: conn._row_strategy = tuple_row_strategy # default row strategy conn._isolation_level = 0 conn._dirty = False from .tz import FixedOffsetTimezone conn._tzinfo_factory = None if use_tz is None else FixedOffsetTimezone if disable_connect_retry: conn._try_open(timeout=login.connect_timeout) else: conn._open() return conn
Opens connection to the database :keyword dsn: SQL server host and instance: <host>[\<instance>] :type dsn: string :keyword failover_partner: secondary database host, used if primary is not accessible :type failover_partner: string :keyword database: the database to initially connect to :type database: string :keyword user: database user to connect as :type user: string :keyword password: user's password :type password: string :keyword timeout: query timeout in seconds, default 0 (no timeout) :type timeout: int :keyword login_timeout: timeout for connection and login in seconds, default 15 :type login_timeout: int :keyword as_dict: whether rows should be returned as dictionaries instead of tuples. :type as_dict: boolean :keyword appname: Set the application name to use for the connection :type appname: string :keyword port: the TCP port to use to connect to the server :type port: int :keyword tds_version: Maximum TDS version to use, should only be used for testing :type tds_version: int :keyword autocommit: Enable or disable database level autocommit :type autocommit: bool :keyword blocksize: Size of block for the TDS protocol, usually should not be used :type blocksize: int :keyword use_mars: Enable or disable MARS :type use_mars: bool :keyword auth: An instance of authentication method class, e.g. Ntlm or Sspi :keyword readonly: Allows to enable read-only mode for connection, only supported by MSSQL 2012, earlier versions will ignore this parameter :type readonly: bool :keyword load_balancer: An instance of load balancer class to use, if not provided will not use load balancer :keyword use_tz: Provides timezone for naive database times, if not provided date and time will be returned in naive format :keyword bytes_to_unicode: If true single byte database strings will be converted to unicode Python strings, otherwise will return strings as ``bytes`` without conversion. :type bytes_to_unicode: bool :keyword row_strategy: strategy used to create rows, determines type of returned rows, can be custom or one of: :func:`tuple_row_strategy`, :func:`list_row_strategy`, :func:`dict_row_strategy`, :func:`namedtuple_row_strategy`, :func:`recordtype_row_strategy` :type row_strategy: function of list of column names returning row factory :keyword cafile: Name of the file containing trusted CAs in PEM format, if provided will enable TLS :type cafile: str :keyword validate_host: Host name validation during TLS connection is enabled by default, if you disable it you will be vulnerable to MitM type of attack. :type validate_host: bool :keyword enc_login_only: Allows you to scope TLS encryption only to an authentication portion. This means that anyone who can observe traffic on your network will be able to see all your SQL requests and potentially modify them. :type enc_login_only: bool :returns: An instance of :class:`Connection`
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L1142-L1331
denisenkom/pytds
src/pytds/__init__.py
Connection.commit
def commit(self): """ Commit transaction which is currently in progress. """ self._assert_open() if self._autocommit: return if not self._conn.tds72_transaction: return self._main_cursor._commit(cont=True, isolation_level=self._isolation_level)
python
def commit(self): """ Commit transaction which is currently in progress. """ self._assert_open() if self._autocommit: return if not self._conn.tds72_transaction: return self._main_cursor._commit(cont=True, isolation_level=self._isolation_level)
Commit transaction which is currently in progress.
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L410-L419
denisenkom/pytds
src/pytds/__init__.py
Connection.cursor
def cursor(self): """ Return cursor object that can be used to make queries and fetch results from the database. """ self._assert_open() if self.mars_enabled: in_tran = self._conn.tds72_transaction if in_tran and self._dirty: try: return _MarsCursor(self, self._conn.create_session(self._tzinfo_factory), self._tzinfo_factory) except (socket.error, OSError) as e: self._conn.close() raise else: try: return _MarsCursor(self, self._conn.create_session(self._tzinfo_factory), self._tzinfo_factory) except (socket.error, OSError) as e: if e.errno not in (errno.EPIPE, errno.ECONNRESET): raise self._conn.close() except ClosedConnectionError: pass self._assert_open() return _MarsCursor(self, self._conn.create_session(self._tzinfo_factory), self._tzinfo_factory) else: return Cursor(self, self._conn.main_session, self._tzinfo_factory)
python
def cursor(self): """ Return cursor object that can be used to make queries and fetch results from the database. """ self._assert_open() if self.mars_enabled: in_tran = self._conn.tds72_transaction if in_tran and self._dirty: try: return _MarsCursor(self, self._conn.create_session(self._tzinfo_factory), self._tzinfo_factory) except (socket.error, OSError) as e: self._conn.close() raise else: try: return _MarsCursor(self, self._conn.create_session(self._tzinfo_factory), self._tzinfo_factory) except (socket.error, OSError) as e: if e.errno not in (errno.EPIPE, errno.ECONNRESET): raise self._conn.close() except ClosedConnectionError: pass self._assert_open() return _MarsCursor(self, self._conn.create_session(self._tzinfo_factory), self._tzinfo_factory) else: return Cursor(self, self._conn.main_session, self._tzinfo_factory)
Return cursor object that can be used to make queries and fetch results from the database.
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L421-L455
denisenkom/pytds
src/pytds/__init__.py
Connection.rollback
def rollback(self): """ Roll back transaction which is currently in progress. """ try: if self._autocommit: return if not self._conn or not self._conn.is_connected(): return if not self._conn.tds72_transaction: return self._main_cursor._rollback(cont=True, isolation_level=self._isolation_level) except socket.error as e: if e.errno in (errno.ENETRESET, errno.ECONNRESET, errno.EPIPE): return self._conn.close() raise except ClosedConnectionError: pass
python
def rollback(self): """ Roll back transaction which is currently in progress. """ try: if self._autocommit: return if not self._conn or not self._conn.is_connected(): return if not self._conn.tds72_transaction: return self._main_cursor._rollback(cont=True, isolation_level=self._isolation_level) except socket.error as e: if e.errno in (errno.ENETRESET, errno.ECONNRESET, errno.EPIPE): return self._conn.close() raise except ClosedConnectionError: pass
Roll back transaction which is currently in progress.
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L457-L479
denisenkom/pytds
src/pytds/__init__.py
Connection.close
def close(self): """ Close connection to an MS SQL Server. This function tries to close the connection and free all memory used. It can be called more than once in a row. No exception is raised in this case. """ if self._conn: if self._pooling: _connection_pool.add(self._key, (self._conn, self._main_cursor._session)) else: self._conn.close() self._active_cursor = None self._main_cursor = None self._conn = None self._closed = True
python
def close(self): """ Close connection to an MS SQL Server. This function tries to close the connection and free all memory used. It can be called more than once in a row. No exception is raised in this case. """ if self._conn: if self._pooling: _connection_pool.add(self._key, (self._conn, self._main_cursor._session)) else: self._conn.close() self._active_cursor = None self._main_cursor = None self._conn = None self._closed = True
Close connection to an MS SQL Server. This function tries to close the connection and free all memory used. It can be called more than once in a row. No exception is raised in this case.
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L481-L496
denisenkom/pytds
src/pytds/__init__.py
Cursor.get_proc_outputs
def get_proc_outputs(self): """ If stored procedure has result sets and OUTPUT parameters use this method after you processed all result sets to get values of OUTPUT parameters. :return: A list of output parameter values. """ self._session.complete_rpc() results = [None] * len(self._session.output_params.items()) for key, param in self._session.output_params.items(): results[key] = param.value return results
python
def get_proc_outputs(self): """ If stored procedure has result sets and OUTPUT parameters use this method after you processed all result sets to get values of OUTPUT parameters. :return: A list of output parameter values. """ self._session.complete_rpc() results = [None] * len(self._session.output_params.items()) for key, param in self._session.output_params.items(): results[key] = param.value return results
If stored procedure has result sets and OUTPUT parameters use this method after you processed all result sets to get values of OUTPUT parameters. :return: A list of output parameter values.
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L561-L572
denisenkom/pytds
src/pytds/__init__.py
Cursor.callproc
def callproc(self, procname, parameters=()): """ Call a stored procedure with the given name. :param procname: The name of the procedure to call :type procname: str :keyword parameters: The optional parameters for the procedure :type parameters: sequence Note: If stored procedure has OUTPUT parameters and result sets this method will not return values for OUTPUT parameters, you should call get_proc_outputs to get values for OUTPUT parameters. """ conn = self._assert_open() conn._try_activate_cursor(self) return self._callproc(procname, parameters)
python
def callproc(self, procname, parameters=()): """ Call a stored procedure with the given name. :param procname: The name of the procedure to call :type procname: str :keyword parameters: The optional parameters for the procedure :type parameters: sequence Note: If stored procedure has OUTPUT parameters and result sets this method will not return values for OUTPUT parameters, you should call get_proc_outputs to get values for OUTPUT parameters. """ conn = self._assert_open() conn._try_activate_cursor(self) return self._callproc(procname, parameters)
Call a stored procedure with the given name. :param procname: The name of the procedure to call :type procname: str :keyword parameters: The optional parameters for the procedure :type parameters: sequence Note: If stored procedure has OUTPUT parameters and result sets this method will not return values for OUTPUT parameters, you should call get_proc_outputs to get values for OUTPUT parameters.
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L574-L589
denisenkom/pytds
src/pytds/__init__.py
Cursor.get_proc_return_status
def get_proc_return_status(self): """ Last stored proc result """ if self._session is None: return None if not self._session.has_status: self._session.find_return_status() return self._session.ret_status if self._session.has_status else None
python
def get_proc_return_status(self): """ Last stored proc result """ if self._session is None: return None if not self._session.has_status: self._session.find_return_status() return self._session.ret_status if self._session.has_status else None
Last stored proc result
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L617-L624
denisenkom/pytds
src/pytds/__init__.py
Cursor.cancel
def cancel(self): """ Cancel current statement """ conn = self._assert_open() conn._try_activate_cursor(self) self._session.cancel_if_pending()
python
def cancel(self): """ Cancel current statement """ conn = self._assert_open() conn._try_activate_cursor(self) self._session.cancel_if_pending()
Cancel current statement
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L626-L631
denisenkom/pytds
src/pytds/__init__.py
Cursor.close
def close(self): """ Closes the cursor. The cursor is unusable from this point. """ conn = self._conn if conn is not None: conn = conn() if conn is not None: if self is conn._active_cursor: conn._active_cursor = conn._main_cursor self._session = None self._conn = None
python
def close(self): """ Closes the cursor. The cursor is unusable from this point. """ conn = self._conn if conn is not None: conn = conn() if conn is not None: if self is conn._active_cursor: conn._active_cursor = conn._main_cursor self._session = None self._conn = None
Closes the cursor. The cursor is unusable from this point.
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L633-L644
denisenkom/pytds
src/pytds/__init__.py
Cursor.execute
def execute(self, operation, params=()): """ Execute the query :param operation: SQL statement :type operation: str """ conn = self._assert_open() conn._try_activate_cursor(self) self._execute(operation, params) # for compatibility with pyodbc return self
python
def execute(self, operation, params=()): """ Execute the query :param operation: SQL statement :type operation: str """ conn = self._assert_open() conn._try_activate_cursor(self) self._execute(operation, params) # for compatibility with pyodbc return self
Execute the query :param operation: SQL statement :type operation: str
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L723-L733
denisenkom/pytds
src/pytds/__init__.py
Cursor.execute_scalar
def execute_scalar(self, query_string, params=None): """ This method sends a query to the MS SQL Server to which this object instance is connected, then returns first column of first row from result. An exception is raised on failure. If there are pending results or rows prior to executing this command, they are silently discarded. This method accepts Python formatting. Please see execute_query() for details. This method is useful if you want just a single value, as in: ``conn.execute_scalar('SELECT COUNT(*) FROM employees')`` This method works in the same way as ``iter(conn).next()[0]``. Remaining rows, if any, can still be iterated after calling this method. """ self.execute(query_string, params) row = self.fetchone() if not row: return None return row[0]
python
def execute_scalar(self, query_string, params=None): """ This method sends a query to the MS SQL Server to which this object instance is connected, then returns first column of first row from result. An exception is raised on failure. If there are pending results or rows prior to executing this command, they are silently discarded. This method accepts Python formatting. Please see execute_query() for details. This method is useful if you want just a single value, as in: ``conn.execute_scalar('SELECT COUNT(*) FROM employees')`` This method works in the same way as ``iter(conn).next()[0]``. Remaining rows, if any, can still be iterated after calling this method. """ self.execute(query_string, params) row = self.fetchone() if not row: return None return row[0]
This method sends a query to the MS SQL Server to which this object instance is connected, then returns first column of first row from result. An exception is raised on failure. If there are pending results or rows prior to executing this command, they are silently discarded. This method accepts Python formatting. Please see execute_query() for details. This method is useful if you want just a single value, as in: ``conn.execute_scalar('SELECT COUNT(*) FROM employees')`` This method works in the same way as ``iter(conn).next()[0]``. Remaining rows, if any, can still be iterated after calling this method.
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L761-L785
denisenkom/pytds
src/pytds/__init__.py
Cursor.description
def description(self): """ Cursor description, see http://legacy.python.org/dev/peps/pep-0249/#description """ if self._session is None: return None res = self._session.res_info if res: return res.description else: return None
python
def description(self): """ Cursor description, see http://legacy.python.org/dev/peps/pep-0249/#description """ if self._session is None: return None res = self._session.res_info if res: return res.description else: return None
Cursor description, see http://legacy.python.org/dev/peps/pep-0249/#description
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L808-L817
denisenkom/pytds
src/pytds/__init__.py
Cursor.messages
def messages(self): """ Messages generated by server, see http://legacy.python.org/dev/peps/pep-0249/#cursor-messages """ if self._session: result = [] for msg in self._session.messages: ex = _create_exception_by_message(msg) result.append((type(ex), ex)) return result else: return None
python
def messages(self): """ Messages generated by server, see http://legacy.python.org/dev/peps/pep-0249/#cursor-messages """ if self._session: result = [] for msg in self._session.messages: ex = _create_exception_by_message(msg) result.append((type(ex), ex)) return result else: return None
Messages generated by server, see http://legacy.python.org/dev/peps/pep-0249/#cursor-messages
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L825-L835
denisenkom/pytds
src/pytds/__init__.py
Cursor.native_description
def native_description(self): """ todo document """ if self._session is None: return None res = self._session.res_info if res: return res.native_descr else: return None
python
def native_description(self): """ todo document """ if self._session is None: return None res = self._session.res_info if res: return res.native_descr else: return None
todo document
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L838-L847
denisenkom/pytds
src/pytds/__init__.py
Cursor.fetchone
def fetchone(self): """ Fetches next row, or ``None`` if there are no more rows """ row = self._session.fetchone() if row: return self._row_factory(row)
python
def fetchone(self): """ Fetches next row, or ``None`` if there are no more rows """ row = self._session.fetchone() if row: return self._row_factory(row)
Fetches next row, or ``None`` if there are no more rows
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L849-L854
denisenkom/pytds
src/pytds/__init__.py
Cursor.copy_to
def copy_to(self, file=None, table_or_view=None, sep='\t', columns=None, check_constraints=False, fire_triggers=False, keep_nulls=False, kb_per_batch=None, rows_per_batch=None, order=None, tablock=False, schema=None, null_string=None, data=None): """ *Experimental*. Efficiently load data to database from file using ``BULK INSERT`` operation :param file: Source file-like object, should be in csv format. Specify either this or data, not both. :param table_or_view: Destination table or view in the database :type table_or_view: str Optional parameters: :keyword sep: Separator used in csv file :type sep: str :keyword columns: List of Column objects or column names in target table to insert to. SQL Server will do some conversions, so these may not have to match the actual table definition exactly. If not provided will insert into all columns assuming nvarchar(4000) NULL for all columns. If only the column name is provided, the type is assumed to be nvarchar(4000) NULL. If rows are given with file, you cannot specify non-string data types. If rows are given with data, the values must be a type supported by the serializer for the column in tds_types. :type columns: list :keyword check_constraints: Check table constraints for incoming data :type check_constraints: bool :keyword fire_triggers: Enable or disable triggers for table :type fire_triggers: bool :keyword keep_nulls: If enabled null values inserted as-is, instead of inserting default value for column :type keep_nulls: bool :keyword kb_per_batch: Kilobytes per batch can be used to optimize performance, see MSSQL server documentation for details :type kb_per_batch: int :keyword rows_per_batch: Rows per batch can be used to optimize performance, see MSSQL server documentation for details :type rows_per_batch: int :keyword order: The ordering of the data in source table. List of columns with ASC or DESC suffix. E.g. ``['order_id ASC', 'name DESC']`` Can be used to optimize performance, see MSSQL server documentation for details :type order: list :keyword tablock: Enable or disable table lock for the duration of bulk load :keyword schema: Name of schema for table or view, if not specified default schema will be used :keyword null_string: String that should be interpreted as a NULL when reading the CSV file. Has no meaning if using data instead of file. :keyword data: The data to insert as an iterable of rows, which are iterables of values. Specify either this or file, not both. """ conn = self._conn() rows = None if data is None: import csv reader = csv.reader(file, delimiter=sep) if null_string is not None: def _convert_null_strings(csv_reader): for row in csv_reader: yield [r if r != null_string else None for r in row] reader = _convert_null_strings(reader) rows = reader else: rows = data obj_name = tds_base.tds_quote_id(table_or_view) if schema: obj_name = '{0}.{1}'.format(tds_base.tds_quote_id(schema), obj_name) if columns: metadata = [] for column in columns: if isinstance(column, Column): metadata.append(column) else: metadata.append(Column(name=column, type=NVarCharType(size=4000), flags=Column.fNullable)) else: self.execute('select top 1 * from {} where 1<>1'.format(obj_name)) metadata = [Column(name=col[0], type=NVarCharType(size=4000), flags=Column.fNullable if col[6] else 0) for col in self.description] col_defs = ','.join('{0} {1}'.format(tds_base.tds_quote_id(col.column_name), col.type.get_declaration()) for col in metadata) with_opts = [] if check_constraints: with_opts.append('CHECK_CONSTRAINTS') if fire_triggers: with_opts.append('FIRE_TRIGGERS') if keep_nulls: with_opts.append('KEEP_NULLS') if kb_per_batch: with_opts.append('KILOBYTES_PER_BATCH = {0}'.format(kb_per_batch)) if rows_per_batch: with_opts.append('ROWS_PER_BATCH = {0}'.format(rows_per_batch)) if order: with_opts.append('ORDER({0})'.format(','.join(order))) if tablock: with_opts.append('TABLOCK') with_part = '' if with_opts: with_part = 'WITH ({0})'.format(','.join(with_opts)) operation = 'INSERT BULK {0}({1}) {2}'.format(obj_name, col_defs, with_part) self.execute(operation) self._session.submit_bulk(metadata, rows) self._session.process_simple_request()
python
def copy_to(self, file=None, table_or_view=None, sep='\t', columns=None, check_constraints=False, fire_triggers=False, keep_nulls=False, kb_per_batch=None, rows_per_batch=None, order=None, tablock=False, schema=None, null_string=None, data=None): """ *Experimental*. Efficiently load data to database from file using ``BULK INSERT`` operation :param file: Source file-like object, should be in csv format. Specify either this or data, not both. :param table_or_view: Destination table or view in the database :type table_or_view: str Optional parameters: :keyword sep: Separator used in csv file :type sep: str :keyword columns: List of Column objects or column names in target table to insert to. SQL Server will do some conversions, so these may not have to match the actual table definition exactly. If not provided will insert into all columns assuming nvarchar(4000) NULL for all columns. If only the column name is provided, the type is assumed to be nvarchar(4000) NULL. If rows are given with file, you cannot specify non-string data types. If rows are given with data, the values must be a type supported by the serializer for the column in tds_types. :type columns: list :keyword check_constraints: Check table constraints for incoming data :type check_constraints: bool :keyword fire_triggers: Enable or disable triggers for table :type fire_triggers: bool :keyword keep_nulls: If enabled null values inserted as-is, instead of inserting default value for column :type keep_nulls: bool :keyword kb_per_batch: Kilobytes per batch can be used to optimize performance, see MSSQL server documentation for details :type kb_per_batch: int :keyword rows_per_batch: Rows per batch can be used to optimize performance, see MSSQL server documentation for details :type rows_per_batch: int :keyword order: The ordering of the data in source table. List of columns with ASC or DESC suffix. E.g. ``['order_id ASC', 'name DESC']`` Can be used to optimize performance, see MSSQL server documentation for details :type order: list :keyword tablock: Enable or disable table lock for the duration of bulk load :keyword schema: Name of schema for table or view, if not specified default schema will be used :keyword null_string: String that should be interpreted as a NULL when reading the CSV file. Has no meaning if using data instead of file. :keyword data: The data to insert as an iterable of rows, which are iterables of values. Specify either this or file, not both. """ conn = self._conn() rows = None if data is None: import csv reader = csv.reader(file, delimiter=sep) if null_string is not None: def _convert_null_strings(csv_reader): for row in csv_reader: yield [r if r != null_string else None for r in row] reader = _convert_null_strings(reader) rows = reader else: rows = data obj_name = tds_base.tds_quote_id(table_or_view) if schema: obj_name = '{0}.{1}'.format(tds_base.tds_quote_id(schema), obj_name) if columns: metadata = [] for column in columns: if isinstance(column, Column): metadata.append(column) else: metadata.append(Column(name=column, type=NVarCharType(size=4000), flags=Column.fNullable)) else: self.execute('select top 1 * from {} where 1<>1'.format(obj_name)) metadata = [Column(name=col[0], type=NVarCharType(size=4000), flags=Column.fNullable if col[6] else 0) for col in self.description] col_defs = ','.join('{0} {1}'.format(tds_base.tds_quote_id(col.column_name), col.type.get_declaration()) for col in metadata) with_opts = [] if check_constraints: with_opts.append('CHECK_CONSTRAINTS') if fire_triggers: with_opts.append('FIRE_TRIGGERS') if keep_nulls: with_opts.append('KEEP_NULLS') if kb_per_batch: with_opts.append('KILOBYTES_PER_BATCH = {0}'.format(kb_per_batch)) if rows_per_batch: with_opts.append('ROWS_PER_BATCH = {0}'.format(rows_per_batch)) if order: with_opts.append('ORDER({0})'.format(','.join(order))) if tablock: with_opts.append('TABLOCK') with_part = '' if with_opts: with_part = 'WITH ({0})'.format(','.join(with_opts)) operation = 'INSERT BULK {0}({1}) {2}'.format(obj_name, col_defs, with_part) self.execute(operation) self._session.submit_bulk(metadata, rows) self._session.process_simple_request()
*Experimental*. Efficiently load data to database from file using ``BULK INSERT`` operation :param file: Source file-like object, should be in csv format. Specify either this or data, not both. :param table_or_view: Destination table or view in the database :type table_or_view: str Optional parameters: :keyword sep: Separator used in csv file :type sep: str :keyword columns: List of Column objects or column names in target table to insert to. SQL Server will do some conversions, so these may not have to match the actual table definition exactly. If not provided will insert into all columns assuming nvarchar(4000) NULL for all columns. If only the column name is provided, the type is assumed to be nvarchar(4000) NULL. If rows are given with file, you cannot specify non-string data types. If rows are given with data, the values must be a type supported by the serializer for the column in tds_types. :type columns: list :keyword check_constraints: Check table constraints for incoming data :type check_constraints: bool :keyword fire_triggers: Enable or disable triggers for table :type fire_triggers: bool :keyword keep_nulls: If enabled null values inserted as-is, instead of inserting default value for column :type keep_nulls: bool :keyword kb_per_batch: Kilobytes per batch can be used to optimize performance, see MSSQL server documentation for details :type kb_per_batch: int :keyword rows_per_batch: Rows per batch can be used to optimize performance, see MSSQL server documentation for details :type rows_per_batch: int :keyword order: The ordering of the data in source table. List of columns with ASC or DESC suffix. E.g. ``['order_id ASC', 'name DESC']`` Can be used to optimize performance, see MSSQL server documentation for details :type order: list :keyword tablock: Enable or disable table lock for the duration of bulk load :keyword schema: Name of schema for table or view, if not specified default schema will be used :keyword null_string: String that should be interpreted as a NULL when reading the CSV file. Has no meaning if using data instead of file. :keyword data: The data to insert as an iterable of rows, which are iterables of values. Specify either this or file, not both.
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L898-L1003
denisenkom/pytds
src/pytds/__init__.py
_MarsCursor.close
def close(self): """ Closes the cursor. The cursor is unusable from this point. """ if self._session is not None: try: self._session.close() self._session = None except socket.error as e: if e.errno != errno.ECONNRESET: raise
python
def close(self): """ Closes the cursor. The cursor is unusable from this point. """ if self._session is not None: try: self._session.close() self._session = None except socket.error as e: if e.errno != errno.ECONNRESET: raise
Closes the cursor. The cursor is unusable from this point.
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L1031-L1041
denisenkom/pytds
src/pytds/__init__.py
_MarsCursor.callproc
def callproc(self, procname, parameters=()): """ Call a stored procedure with the given name. :param procname: The name of the procedure to call :type procname: str :keyword parameters: The optional parameters for the procedure :type parameters: sequence """ self._assert_open() return self._callproc(procname, parameters)
python
def callproc(self, procname, parameters=()): """ Call a stored procedure with the given name. :param procname: The name of the procedure to call :type procname: str :keyword parameters: The optional parameters for the procedure :type parameters: sequence """ self._assert_open() return self._callproc(procname, parameters)
Call a stored procedure with the given name. :param procname: The name of the procedure to call :type procname: str :keyword parameters: The optional parameters for the procedure :type parameters: sequence
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L1049-L1059
galaxyproject/pulsar
pulsar/managers/base/__init__.py
__posix_to_local_path
def __posix_to_local_path(path, local_path_module=os.path): """ Converts a posix path (coming from Galaxy), to a local path (be it posix or Windows). >>> import ntpath >>> __posix_to_local_path('dataset_1_files/moo/cow', local_path_module=ntpath) 'dataset_1_files\\\\moo\\\\cow' >>> import posixpath >>> __posix_to_local_path('dataset_1_files/moo/cow', local_path_module=posixpath) 'dataset_1_files/moo/cow' """ partial_path = deque() while True: if not path or path == '/': break (path, base) = posixpath.split(path) partial_path.appendleft(base) return local_path_module.join(*partial_path)
python
def __posix_to_local_path(path, local_path_module=os.path): """ Converts a posix path (coming from Galaxy), to a local path (be it posix or Windows). >>> import ntpath >>> __posix_to_local_path('dataset_1_files/moo/cow', local_path_module=ntpath) 'dataset_1_files\\\\moo\\\\cow' >>> import posixpath >>> __posix_to_local_path('dataset_1_files/moo/cow', local_path_module=posixpath) 'dataset_1_files/moo/cow' """ partial_path = deque() while True: if not path or path == '/': break (path, base) = posixpath.split(path) partial_path.appendleft(base) return local_path_module.join(*partial_path)
Converts a posix path (coming from Galaxy), to a local path (be it posix or Windows). >>> import ntpath >>> __posix_to_local_path('dataset_1_files/moo/cow', local_path_module=ntpath) 'dataset_1_files\\\\moo\\\\cow' >>> import posixpath >>> __posix_to_local_path('dataset_1_files/moo/cow', local_path_module=posixpath) 'dataset_1_files/moo/cow'
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/base/__init__.py#L373-L390
galaxyproject/pulsar
pulsar/managers/base/__init__.py
JobDirectory.calculate_path
def calculate_path(self, remote_path, input_type): """ Verify remote_path is in directory for input_type inputs and create directory if needed. """ directory, allow_nested_files = self._directory_for_file_type(input_type) path = get_mapped_file(directory, remote_path, allow_nested_files=allow_nested_files) return path
python
def calculate_path(self, remote_path, input_type): """ Verify remote_path is in directory for input_type inputs and create directory if needed. """ directory, allow_nested_files = self._directory_for_file_type(input_type) path = get_mapped_file(directory, remote_path, allow_nested_files=allow_nested_files) return path
Verify remote_path is in directory for input_type inputs and create directory if needed.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/base/__init__.py#L231-L237
galaxyproject/pulsar
pulsar/client/client.py
BaseJobClient.setup
def setup(self, tool_id=None, tool_version=None, preserve_galaxy_python_environment=None): """ Setup remote Pulsar server to run this job. """ setup_args = {"job_id": self.job_id} if tool_id: setup_args["tool_id"] = tool_id if tool_version: setup_args["tool_version"] = tool_version if preserve_galaxy_python_environment: setup_args["preserve_galaxy_python_environment"] = preserve_galaxy_python_environment return self.setup_handler.setup(**setup_args)
python
def setup(self, tool_id=None, tool_version=None, preserve_galaxy_python_environment=None): """ Setup remote Pulsar server to run this job. """ setup_args = {"job_id": self.job_id} if tool_id: setup_args["tool_id"] = tool_id if tool_version: setup_args["tool_version"] = tool_version if preserve_galaxy_python_environment: setup_args["preserve_galaxy_python_environment"] = preserve_galaxy_python_environment return self.setup_handler.setup(**setup_args)
Setup remote Pulsar server to run this job.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/client.py#L66-L77
galaxyproject/pulsar
pulsar/client/client.py
JobClient.launch
def launch(self, command_line, dependencies_description=None, env=[], remote_staging=[], job_config=None): """ Queue up the execution of the supplied `command_line` on the remote server. Called launch for historical reasons, should be renamed to enqueue or something like that. **Parameters** command_line : str Command to execute. """ launch_params = dict(command_line=command_line, job_id=self.job_id) submit_params_dict = submit_params(self.destination_params) if submit_params_dict: launch_params['params'] = json_dumps(submit_params_dict) if dependencies_description: launch_params['dependencies_description'] = json_dumps(dependencies_description.to_dict()) if env: launch_params['env'] = json_dumps(env) if remote_staging: launch_params['remote_staging'] = json_dumps(remote_staging) if job_config and 'touch_outputs' in job_config: # message clients pass the entire job config launch_params['submit_extras'] = json_dumps({'touch_outputs': job_config['touch_outputs']}) if job_config and self.setup_handler.local: # Setup not yet called, job properties were inferred from # destination arguments. Hence, must have Pulsar setup job # before queueing. setup_params = _setup_params_from_job_config(job_config) launch_params['setup_params'] = json_dumps(setup_params) return self._raw_execute("submit", launch_params)
python
def launch(self, command_line, dependencies_description=None, env=[], remote_staging=[], job_config=None): """ Queue up the execution of the supplied `command_line` on the remote server. Called launch for historical reasons, should be renamed to enqueue or something like that. **Parameters** command_line : str Command to execute. """ launch_params = dict(command_line=command_line, job_id=self.job_id) submit_params_dict = submit_params(self.destination_params) if submit_params_dict: launch_params['params'] = json_dumps(submit_params_dict) if dependencies_description: launch_params['dependencies_description'] = json_dumps(dependencies_description.to_dict()) if env: launch_params['env'] = json_dumps(env) if remote_staging: launch_params['remote_staging'] = json_dumps(remote_staging) if job_config and 'touch_outputs' in job_config: # message clients pass the entire job config launch_params['submit_extras'] = json_dumps({'touch_outputs': job_config['touch_outputs']}) if job_config and self.setup_handler.local: # Setup not yet called, job properties were inferred from # destination arguments. Hence, must have Pulsar setup job # before queueing. setup_params = _setup_params_from_job_config(job_config) launch_params['setup_params'] = json_dumps(setup_params) return self._raw_execute("submit", launch_params)
Queue up the execution of the supplied `command_line` on the remote server. Called launch for historical reasons, should be renamed to enqueue or something like that. **Parameters** command_line : str Command to execute.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/client.py#L102-L133
galaxyproject/pulsar
pulsar/client/client.py
JobClient.fetch_output
def fetch_output(self, path, name, working_directory, action_type, output_type): """ Fetch (transfer, copy, etc...) an output from the remote Pulsar server. **Parameters** path : str Local path of the dataset. name : str Remote name of file (i.e. path relative to remote staging output or working directory). working_directory : str Local working_directory for the job. action_type : str Where to find file on Pulsar (output_workdir or output). legacy is also an option in this case Pulsar is asked for location - this will only be used if targetting an older Pulsar server that didn't return statuses allowing this to be inferred. """ if output_type in ['output_workdir', 'output_metadata']: self._populate_output_path(name, path, action_type, output_type) elif output_type == 'output': self._fetch_output(path=path, name=name, action_type=action_type) else: raise Exception("Unknown output_type %s" % output_type)
python
def fetch_output(self, path, name, working_directory, action_type, output_type): """ Fetch (transfer, copy, etc...) an output from the remote Pulsar server. **Parameters** path : str Local path of the dataset. name : str Remote name of file (i.e. path relative to remote staging output or working directory). working_directory : str Local working_directory for the job. action_type : str Where to find file on Pulsar (output_workdir or output). legacy is also an option in this case Pulsar is asked for location - this will only be used if targetting an older Pulsar server that didn't return statuses allowing this to be inferred. """ if output_type in ['output_workdir', 'output_metadata']: self._populate_output_path(name, path, action_type, output_type) elif output_type == 'output': self._fetch_output(path=path, name=name, action_type=action_type) else: raise Exception("Unknown output_type %s" % output_type)
Fetch (transfer, copy, etc...) an output from the remote Pulsar server. **Parameters** path : str Local path of the dataset. name : str Remote name of file (i.e. path relative to remote staging output or working directory). working_directory : str Local working_directory for the job. action_type : str Where to find file on Pulsar (output_workdir or output). legacy is also an option in this case Pulsar is asked for location - this will only be used if targetting an older Pulsar server that didn't return statuses allowing this to be inferred.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/client.py#L196-L220
galaxyproject/pulsar
pulsar/managers/queued_drmaa_xsede.py
check_output
def check_output(args): """Pipe-safe (and 2.6 compatible) version of subprocess.check_output """ proc = Popen(args, stdout=PIPE) out = proc.communicate()[0] if proc.returncode: raise CalledProcessError(proc.returncode, args, output=out) return out
python
def check_output(args): """Pipe-safe (and 2.6 compatible) version of subprocess.check_output """ proc = Popen(args, stdout=PIPE) out = proc.communicate()[0] if proc.returncode: raise CalledProcessError(proc.returncode, args, output=out) return out
Pipe-safe (and 2.6 compatible) version of subprocess.check_output
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/queued_drmaa_xsede.py#L42-L49
galaxyproject/pulsar
pulsar/client/action_mapper.py
FileActionMapper.unstructured_mappers
def unstructured_mappers(self): """ Return mappers that will map 'unstructured' files (i.e. go beyond mapping inputs, outputs, and config files). """ return filter(lambda m: path_type.UNSTRUCTURED in m.path_types, self.mappers)
python
def unstructured_mappers(self): """ Return mappers that will map 'unstructured' files (i.e. go beyond mapping inputs, outputs, and config files). """ return filter(lambda m: path_type.UNSTRUCTURED in m.path_types, self.mappers)
Return mappers that will map 'unstructured' files (i.e. go beyond mapping inputs, outputs, and config files).
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/action_mapper.py#L177-L181
galaxyproject/pulsar
pulsar/client/action_mapper.py
FileActionMapper.__process_action
def __process_action(self, action, file_type): """ Extension point to populate extra action information after an action has been created. """ if getattr(action, "inject_url", False): self.__inject_url(action, file_type) if getattr(action, "inject_ssh_properties", False): self.__inject_ssh_properties(action)
python
def __process_action(self, action, file_type): """ Extension point to populate extra action information after an action has been created. """ if getattr(action, "inject_url", False): self.__inject_url(action, file_type) if getattr(action, "inject_ssh_properties", False): self.__inject_ssh_properties(action)
Extension point to populate extra action information after an action has been created.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/action_mapper.py#L235-L242
galaxyproject/pulsar
pulsar/managers/util/kill.py
_psutil_kill_pid
def _psutil_kill_pid(pid): """ http://stackoverflow.com/questions/1230669/subprocess-deleting-child-processes-in-windows """ try: parent = Process(pid) for child in parent.children(recursive=True): child.kill() parent.kill() except NoSuchProcess: return
python
def _psutil_kill_pid(pid): """ http://stackoverflow.com/questions/1230669/subprocess-deleting-child-processes-in-windows """ try: parent = Process(pid) for child in parent.children(recursive=True): child.kill() parent.kill() except NoSuchProcess: return
http://stackoverflow.com/questions/1230669/subprocess-deleting-child-processes-in-windows
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/util/kill.py#L20-L30
galaxyproject/pulsar
pulsar/managers/util/external.py
parse_external_id
def parse_external_id(output, type=EXTERNAL_ID_TYPE_ANY): """ Attempt to parse the output of job submission commands for an external id.__doc__ >>> parse_external_id("12345.pbsmanager") '12345.pbsmanager' >>> parse_external_id('Submitted batch job 185') '185' >>> parse_external_id('Submitted batch job 185', type='torque') 'Submitted batch job 185' >>> parse_external_id('submitted to cluster 125.') '125' >>> parse_external_id('submitted to cluster 125.', type='slurm') >>> """ external_id = None for pattern_type, pattern in EXTERNAL_ID_PATTERNS: if type != EXTERNAL_ID_TYPE_ANY and type != pattern_type: continue match = search(pattern, output) if match: external_id = match.group(1) break return external_id
python
def parse_external_id(output, type=EXTERNAL_ID_TYPE_ANY): """ Attempt to parse the output of job submission commands for an external id.__doc__ >>> parse_external_id("12345.pbsmanager") '12345.pbsmanager' >>> parse_external_id('Submitted batch job 185') '185' >>> parse_external_id('Submitted batch job 185', type='torque') 'Submitted batch job 185' >>> parse_external_id('submitted to cluster 125.') '125' >>> parse_external_id('submitted to cluster 125.', type='slurm') >>> """ external_id = None for pattern_type, pattern in EXTERNAL_ID_PATTERNS: if type != EXTERNAL_ID_TYPE_ANY and type != pattern_type: continue match = search(pattern, output) if match: external_id = match.group(1) break return external_id
Attempt to parse the output of job submission commands for an external id.__doc__ >>> parse_external_id("12345.pbsmanager") '12345.pbsmanager' >>> parse_external_id('Submitted batch job 185') '185' >>> parse_external_id('Submitted batch job 185', type='torque') 'Submitted batch job 185' >>> parse_external_id('submitted to cluster 125.') '125' >>> parse_external_id('submitted to cluster 125.', type='slurm') >>>
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/util/external.py#L12-L37
galaxyproject/pulsar
pulsar/managers/queued_external_drmaa.py
_handle_default
def _handle_default(value, script_name): """ There are two potential variants of these scripts, the Bash scripts that are meant to be run within PULSAR_ROOT for older-style installs and the binaries created by setup.py as part of a proper pulsar installation. This method first looks for the newer style variant of these scripts and returns the full path to them if needed and falls back to the bash scripts if these cannot be found. """ if value: return value installed_script = which("pulsar-%s" % script_name.replace("_", "-")) if installed_script: return installed_script else: return "scripts/%s.bash" % script_name
python
def _handle_default(value, script_name): """ There are two potential variants of these scripts, the Bash scripts that are meant to be run within PULSAR_ROOT for older-style installs and the binaries created by setup.py as part of a proper pulsar installation. This method first looks for the newer style variant of these scripts and returns the full path to them if needed and falls back to the bash scripts if these cannot be found. """ if value: return value installed_script = which("pulsar-%s" % script_name.replace("_", "-")) if installed_script: return installed_script else: return "scripts/%s.bash" % script_name
There are two potential variants of these scripts, the Bash scripts that are meant to be run within PULSAR_ROOT for older-style installs and the binaries created by setup.py as part of a proper pulsar installation. This method first looks for the newer style variant of these scripts and returns the full path to them if needed and falls back to the bash scripts if these cannot be found.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/queued_external_drmaa.py#L91-L108
galaxyproject/pulsar
pulsar/client/staging/up.py
_read
def _read(path): """ Utility method to quickly read small files (config files and tool wrappers) into memory as bytes. """ input = open(path, "r", encoding="utf-8") try: return input.read() finally: input.close()
python
def _read(path): """ Utility method to quickly read small files (config files and tool wrappers) into memory as bytes. """ input = open(path, "r", encoding="utf-8") try: return input.read() finally: input.close()
Utility method to quickly read small files (config files and tool wrappers) into memory as bytes.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/staging/up.py#L487-L496
galaxyproject/pulsar
pulsar/client/staging/up.py
JobInputs.find_referenced_subfiles
def find_referenced_subfiles(self, directory): """ Return list of files below specified `directory` in job inputs. Could use more sophisticated logic (match quotes to handle spaces, handle subdirectories, etc...). **Parameters** directory : str Full path to directory to search. """ if directory is None: return [] pattern = r'''[\'\"]?(%s%s[^\s\'\"]+)[\'\"]?''' % (escape(directory), escape(sep)) return self.find_pattern_references(pattern)
python
def find_referenced_subfiles(self, directory): """ Return list of files below specified `directory` in job inputs. Could use more sophisticated logic (match quotes to handle spaces, handle subdirectories, etc...). **Parameters** directory : str Full path to directory to search. """ if directory is None: return [] pattern = r'''[\'\"]?(%s%s[^\s\'\"]+)[\'\"]?''' % (escape(directory), escape(sep)) return self.find_pattern_references(pattern)
Return list of files below specified `directory` in job inputs. Could use more sophisticated logic (match quotes to handle spaces, handle subdirectories, etc...). **Parameters** directory : str Full path to directory to search.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/staging/up.py#L360-L376
galaxyproject/pulsar
pulsar/client/staging/up.py
JobInputs.rewrite_paths
def rewrite_paths(self, local_path, remote_path): """ Rewrite references to `local_path` with `remote_path` in job inputs. """ self.__rewrite_command_line(local_path, remote_path) self.__rewrite_config_files(local_path, remote_path)
python
def rewrite_paths(self, local_path, remote_path): """ Rewrite references to `local_path` with `remote_path` in job inputs. """ self.__rewrite_command_line(local_path, remote_path) self.__rewrite_config_files(local_path, remote_path)
Rewrite references to `local_path` with `remote_path` in job inputs.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/staging/up.py#L387-L392
galaxyproject/pulsar
pulsar/client/staging/up.py
TransferTracker.rewrite_input_paths
def rewrite_input_paths(self): """ For each file that has been transferred and renamed, updated command_line and configfiles to reflect that rewrite. """ for local_path, remote_path in self.file_renames.items(): self.job_inputs.rewrite_paths(local_path, remote_path)
python
def rewrite_input_paths(self): """ For each file that has been transferred and renamed, updated command_line and configfiles to reflect that rewrite. """ for local_path, remote_path in self.file_renames.items(): self.job_inputs.rewrite_paths(local_path, remote_path)
For each file that has been transferred and renamed, updated command_line and configfiles to reflect that rewrite.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/staging/up.py#L475-L481
galaxyproject/pulsar
pulsar/client/amqp_exchange.py
PulsarExchange.__get_payload
def __get_payload(self, uuid, failed): """Retry reading a message from the publish_uuid_store once, delete on the second failure.""" # Caller should have the publish_uuid_store lock try: return self.publish_uuid_store[uuid] except Exception as exc: msg = "Failed to load payload from publish store for UUID %s, %s: %s" if uuid in failed: log.error(msg, uuid, "discarding", str(exc)) self.__discard_publish_uuid(uuid, failed) else: log.error(msg, uuid, "will try agan", str(exc)) failed.add(uuid) return None
python
def __get_payload(self, uuid, failed): """Retry reading a message from the publish_uuid_store once, delete on the second failure.""" # Caller should have the publish_uuid_store lock try: return self.publish_uuid_store[uuid] except Exception as exc: msg = "Failed to load payload from publish store for UUID %s, %s: %s" if uuid in failed: log.error(msg, uuid, "discarding", str(exc)) self.__discard_publish_uuid(uuid, failed) else: log.error(msg, uuid, "will try agan", str(exc)) failed.add(uuid) return None
Retry reading a message from the publish_uuid_store once, delete on the second failure.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/amqp_exchange.py#L239-L252
galaxyproject/pulsar
pulsar/manager_endpoint_util.py
__job_complete_dict
def __job_complete_dict(complete_status, manager, job_id): """ Build final dictionary describing completed job for consumption by Pulsar client. """ return_code = manager.return_code(job_id) if return_code == PULSAR_UNKNOWN_RETURN_CODE: return_code = None stdout_contents = manager.stdout_contents(job_id).decode("utf-8") stderr_contents = manager.stderr_contents(job_id).decode("utf-8") job_directory = manager.job_directory(job_id) as_dict = dict( job_id=job_id, complete="true", # Is this still used or is it legacy. status=complete_status, returncode=return_code, stdout=stdout_contents, stderr=stderr_contents, working_directory=job_directory.working_directory(), metadata_directory=job_directory.metadata_directory(), working_directory_contents=job_directory.working_directory_contents(), metadata_directory_contents=job_directory.metadata_directory_contents(), outputs_directory_contents=job_directory.outputs_directory_contents(), system_properties=manager.system_properties(), pulsar_version=pulsar_version, ) return as_dict
python
def __job_complete_dict(complete_status, manager, job_id): """ Build final dictionary describing completed job for consumption by Pulsar client. """ return_code = manager.return_code(job_id) if return_code == PULSAR_UNKNOWN_RETURN_CODE: return_code = None stdout_contents = manager.stdout_contents(job_id).decode("utf-8") stderr_contents = manager.stderr_contents(job_id).decode("utf-8") job_directory = manager.job_directory(job_id) as_dict = dict( job_id=job_id, complete="true", # Is this still used or is it legacy. status=complete_status, returncode=return_code, stdout=stdout_contents, stderr=stderr_contents, working_directory=job_directory.working_directory(), metadata_directory=job_directory.metadata_directory(), working_directory_contents=job_directory.working_directory_contents(), metadata_directory_contents=job_directory.metadata_directory_contents(), outputs_directory_contents=job_directory.outputs_directory_contents(), system_properties=manager.system_properties(), pulsar_version=pulsar_version, ) return as_dict
Build final dictionary describing completed job for consumption by Pulsar client.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/manager_endpoint_util.py#L29-L54
galaxyproject/pulsar
pulsar/manager_endpoint_util.py
submit_job
def submit_job(manager, job_config): """ Launch new job from specified config. May have been previously 'setup' if 'setup_params' in job_config is empty. """ # job_config is raw dictionary from JSON (from MQ or HTTP endpoint). job_id = job_config.get('job_id') try: command_line = job_config.get('command_line') setup_params = job_config.get('setup_params', {}) force_setup = job_config.get('setup') remote_staging = job_config.get('remote_staging', {}) dependencies_description = job_config.get('dependencies_description', None) env = job_config.get('env', []) submit_params = job_config.get('submit_params', {}) touch_outputs = job_config.get('touch_outputs', []) job_config = None if setup_params or force_setup: input_job_id = setup_params.get("job_id", job_id) tool_id = setup_params.get("tool_id", None) tool_version = setup_params.get("tool_version", None) use_metadata = setup_params.get("use_metadata", False) job_config = setup_job( manager, input_job_id, tool_id, tool_version, use_metadata, ) if job_config is not None: job_directory = job_config["job_directory"] jobs_directory = os.path.abspath(os.path.join(job_directory, os.pardir)) command_line = command_line.replace('__PULSAR_JOBS_DIRECTORY__', jobs_directory) # TODO: Handle __PULSAR_JOB_DIRECTORY__ config files, metadata files, etc... manager.touch_outputs(job_id, touch_outputs) launch_config = { "remote_staging": remote_staging, "command_line": command_line, "dependencies_description": dependencies_description, "submit_params": submit_params, "env": env, "setup_params": setup_params, } manager.preprocess_and_launch(job_id, launch_config) except Exception: manager.handle_failure_before_launch(job_id) raise
python
def submit_job(manager, job_config): """ Launch new job from specified config. May have been previously 'setup' if 'setup_params' in job_config is empty. """ # job_config is raw dictionary from JSON (from MQ or HTTP endpoint). job_id = job_config.get('job_id') try: command_line = job_config.get('command_line') setup_params = job_config.get('setup_params', {}) force_setup = job_config.get('setup') remote_staging = job_config.get('remote_staging', {}) dependencies_description = job_config.get('dependencies_description', None) env = job_config.get('env', []) submit_params = job_config.get('submit_params', {}) touch_outputs = job_config.get('touch_outputs', []) job_config = None if setup_params or force_setup: input_job_id = setup_params.get("job_id", job_id) tool_id = setup_params.get("tool_id", None) tool_version = setup_params.get("tool_version", None) use_metadata = setup_params.get("use_metadata", False) job_config = setup_job( manager, input_job_id, tool_id, tool_version, use_metadata, ) if job_config is not None: job_directory = job_config["job_directory"] jobs_directory = os.path.abspath(os.path.join(job_directory, os.pardir)) command_line = command_line.replace('__PULSAR_JOBS_DIRECTORY__', jobs_directory) # TODO: Handle __PULSAR_JOB_DIRECTORY__ config files, metadata files, etc... manager.touch_outputs(job_id, touch_outputs) launch_config = { "remote_staging": remote_staging, "command_line": command_line, "dependencies_description": dependencies_description, "submit_params": submit_params, "env": env, "setup_params": setup_params, } manager.preprocess_and_launch(job_id, launch_config) except Exception: manager.handle_failure_before_launch(job_id) raise
Launch new job from specified config. May have been previously 'setup' if 'setup_params' in job_config is empty.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/manager_endpoint_util.py#L57-L105
galaxyproject/pulsar
pulsar/manager_endpoint_util.py
setup_job
def setup_job(manager, job_id, tool_id, tool_version, use_metadata=False): """ Setup new job from these inputs and return dict summarizing state (used to configure command line). """ job_id = manager.setup_job(job_id, tool_id, tool_version) if use_metadata: manager.enable_metadata_directory(job_id) return build_job_config( job_id=job_id, job_directory=manager.job_directory(job_id), system_properties=manager.system_properties(), tool_id=tool_id, tool_version=tool_version )
python
def setup_job(manager, job_id, tool_id, tool_version, use_metadata=False): """ Setup new job from these inputs and return dict summarizing state (used to configure command line). """ job_id = manager.setup_job(job_id, tool_id, tool_version) if use_metadata: manager.enable_metadata_directory(job_id) return build_job_config( job_id=job_id, job_directory=manager.job_directory(job_id), system_properties=manager.system_properties(), tool_id=tool_id, tool_version=tool_version )
Setup new job from these inputs and return dict summarizing state (used to configure command line).
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/manager_endpoint_util.py#L108-L121
galaxyproject/pulsar
pulsar/core.py
PulsarApp.__setup_tool_config
def __setup_tool_config(self, conf): """ Setups toolbox object and authorization mechanism based on supplied toolbox_path. """ tool_config_files = conf.get("tool_config_files", None) if not tool_config_files: # For compatibity with Galaxy, allow tool_config_file # option name. tool_config_files = conf.get("tool_config_file", None) toolbox = None if tool_config_files: toolbox = ToolBox(tool_config_files) else: log.info(NOT_WHITELIST_WARNING) self.toolbox = toolbox self.authorizer = get_authorizer(toolbox)
python
def __setup_tool_config(self, conf): """ Setups toolbox object and authorization mechanism based on supplied toolbox_path. """ tool_config_files = conf.get("tool_config_files", None) if not tool_config_files: # For compatibity with Galaxy, allow tool_config_file # option name. tool_config_files = conf.get("tool_config_file", None) toolbox = None if tool_config_files: toolbox = ToolBox(tool_config_files) else: log.info(NOT_WHITELIST_WARNING) self.toolbox = toolbox self.authorizer = get_authorizer(toolbox)
Setups toolbox object and authorization mechanism based on supplied toolbox_path.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/core.py#L67-L83
galaxyproject/pulsar
pulsar/core.py
PulsarApp.only_manager
def only_manager(self): """Convience accessor for tests and contexts with sole manager.""" assert len(self.managers) == 1, MULTIPLE_MANAGERS_MESSAGE return list(self.managers.values())[0]
python
def only_manager(self): """Convience accessor for tests and contexts with sole manager.""" assert len(self.managers) == 1, MULTIPLE_MANAGERS_MESSAGE return list(self.managers.values())[0]
Convience accessor for tests and contexts with sole manager.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/core.py#L138-L141
galaxyproject/pulsar
pulsar/web/wsgi.py
app_factory
def app_factory(global_conf, **local_conf): """ Returns the Pulsar WSGI application. """ configuration_file = global_conf.get("__file__", None) webapp = init_webapp(ini_path=configuration_file, local_conf=local_conf) return webapp
python
def app_factory(global_conf, **local_conf): """ Returns the Pulsar WSGI application. """ configuration_file = global_conf.get("__file__", None) webapp = init_webapp(ini_path=configuration_file, local_conf=local_conf) return webapp
Returns the Pulsar WSGI application.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/web/wsgi.py#L14-L20
galaxyproject/pulsar
pulsar/managers/queued.py
QueueManager.run_next
def run_next(self): """ Run the next item in the queue (a job waiting to run). """ while 1: (op, obj) = self.work_queue.get() if op is STOP_SIGNAL: return try: (job_id, command_line) = obj try: os.remove(self._job_file(job_id, JOB_FILE_COMMAND_LINE)) except Exception: log.exception("Running command but failed to delete - command may rerun on Pulsar boot.") # _run will not do anything if job has been cancelled. self._run(job_id, command_line, background=False) except Exception: log.warn("Uncaught exception running job with job_id %s" % job_id) traceback.print_exc()
python
def run_next(self): """ Run the next item in the queue (a job waiting to run). """ while 1: (op, obj) = self.work_queue.get() if op is STOP_SIGNAL: return try: (job_id, command_line) = obj try: os.remove(self._job_file(job_id, JOB_FILE_COMMAND_LINE)) except Exception: log.exception("Running command but failed to delete - command may rerun on Pulsar boot.") # _run will not do anything if job has been cancelled. self._run(job_id, command_line, background=False) except Exception: log.warn("Uncaught exception running job with job_id %s" % job_id) traceback.print_exc()
Run the next item in the queue (a job waiting to run).
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/queued.py#L77-L95
galaxyproject/pulsar
tools/install_venv.py
check_dependencies
def check_dependencies(): """Make sure virtualenv is in the path.""" print 'Checking dependencies...' if not HAS_VIRTUALENV: print 'Virtual environment not found.' # Try installing it via easy_install... if HAS_EASY_INSTALL: print 'Installing virtualenv via easy_install...', run_command(['easy_install', 'virtualenv'], die_message='easy_install failed to install virtualenv' '\ndevelopment requires virtualenv, please' ' install it using your favorite tool') if not run_command(['which', 'virtualenv']): die('ERROR: virtualenv not found in path.\n\ndevelopment ' ' requires virtualenv, please install it using your' ' favorite package management tool and ensure' ' virtualenv is in your path') print 'virtualenv installation done.' else: die('easy_install not found.\n\nInstall easy_install' ' (python-setuptools in ubuntu) or virtualenv by hand,' ' then rerun.') print 'dependency check done.'
python
def check_dependencies(): """Make sure virtualenv is in the path.""" print 'Checking dependencies...' if not HAS_VIRTUALENV: print 'Virtual environment not found.' # Try installing it via easy_install... if HAS_EASY_INSTALL: print 'Installing virtualenv via easy_install...', run_command(['easy_install', 'virtualenv'], die_message='easy_install failed to install virtualenv' '\ndevelopment requires virtualenv, please' ' install it using your favorite tool') if not run_command(['which', 'virtualenv']): die('ERROR: virtualenv not found in path.\n\ndevelopment ' ' requires virtualenv, please install it using your' ' favorite package management tool and ensure' ' virtualenv is in your path') print 'virtualenv installation done.' else: die('easy_install not found.\n\nInstall easy_install' ' (python-setuptools in ubuntu) or virtualenv by hand,' ' then rerun.') print 'dependency check done.'
Make sure virtualenv is in the path.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/tools/install_venv.py#L66-L89
galaxyproject/pulsar
tools/install_venv.py
create_virtualenv
def create_virtualenv(venv=VENV): """Creates the virtual environment and installs PIP only into the virtual environment """ print 'Creating venv...', run_command(['virtualenv', '-q', '--no-site-packages', VENV]) print 'done.' print 'Installing pip in virtualenv...', if not run_command([WITH_VENV, 'easy_install', 'pip']).strip(): die("Failed to install pip.") print 'done.' print 'Installing distribute in virtualenv...' pip_install('distribute>=0.6.24') print 'done.'
python
def create_virtualenv(venv=VENV): """Creates the virtual environment and installs PIP only into the virtual environment """ print 'Creating venv...', run_command(['virtualenv', '-q', '--no-site-packages', VENV]) print 'done.' print 'Installing pip in virtualenv...', if not run_command([WITH_VENV, 'easy_install', 'pip']).strip(): die("Failed to install pip.") print 'done.' print 'Installing distribute in virtualenv...' pip_install('distribute>=0.6.24') print 'done.'
Creates the virtual environment and installs PIP only into the virtual environment
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/tools/install_venv.py#L92-L105
galaxyproject/pulsar
pulsar/client/path_mapper.py
PathMapper.__remote_path_rewrite
def __remote_path_rewrite(self, dataset_path, dataset_path_type, name=None): """ Return remote path of this file (if staging is required) else None. """ path = str(dataset_path) # Use false_path if needed. action = self.action_mapper.action(path, dataset_path_type) if action.staging_needed: if name is None: name = os.path.basename(path) remote_directory = self.__remote_directory(dataset_path_type) remote_path_rewrite = self.path_helper.remote_join(remote_directory, name) else: # Actions which don't require staging MUST define a path_rewrite # method. remote_path_rewrite = action.path_rewrite(self.path_helper) return remote_path_rewrite
python
def __remote_path_rewrite(self, dataset_path, dataset_path_type, name=None): """ Return remote path of this file (if staging is required) else None. """ path = str(dataset_path) # Use false_path if needed. action = self.action_mapper.action(path, dataset_path_type) if action.staging_needed: if name is None: name = os.path.basename(path) remote_directory = self.__remote_directory(dataset_path_type) remote_path_rewrite = self.path_helper.remote_join(remote_directory, name) else: # Actions which don't require staging MUST define a path_rewrite # method. remote_path_rewrite = action.path_rewrite(self.path_helper) return remote_path_rewrite
Return remote path of this file (if staging is required) else None.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/path_mapper.py#L67-L82
galaxyproject/pulsar
pulsar/managers/util/retry.py
_retry_over_time
def _retry_over_time(fun, catch, args=[], kwargs={}, errback=None, max_retries=None, interval_start=2, interval_step=2, interval_max=30): """Retry the function over and over until max retries is exceeded. For each retry we sleep a for a while before we try again, this interval is increased for every retry until the max seconds is reached. :param fun: The function to try :param catch: Exceptions to catch, can be either tuple or a single exception class. :keyword args: Positional arguments passed on to the function. :keyword kwargs: Keyword arguments passed on to the function. :keyword max_retries: Maximum number of retries before we give up. If this is not set, we will retry forever. :keyword interval_start: How long (in seconds) we start sleeping between retries. :keyword interval_step: By how much the interval is increased for each retry. :keyword interval_max: Maximum number of seconds to sleep between retries. """ retries = 0 interval_range = __fxrange(interval_start, interval_max + interval_start, interval_step, repeatlast=True) for retries in count(): try: return fun(*args, **kwargs) except catch as exc: if max_retries and retries >= max_retries: raise tts = float(errback(exc, interval_range, retries) if errback else next(interval_range)) if tts: sleep(tts)
python
def _retry_over_time(fun, catch, args=[], kwargs={}, errback=None, max_retries=None, interval_start=2, interval_step=2, interval_max=30): """Retry the function over and over until max retries is exceeded. For each retry we sleep a for a while before we try again, this interval is increased for every retry until the max seconds is reached. :param fun: The function to try :param catch: Exceptions to catch, can be either tuple or a single exception class. :keyword args: Positional arguments passed on to the function. :keyword kwargs: Keyword arguments passed on to the function. :keyword max_retries: Maximum number of retries before we give up. If this is not set, we will retry forever. :keyword interval_start: How long (in seconds) we start sleeping between retries. :keyword interval_step: By how much the interval is increased for each retry. :keyword interval_max: Maximum number of seconds to sleep between retries. """ retries = 0 interval_range = __fxrange(interval_start, interval_max + interval_start, interval_step, repeatlast=True) for retries in count(): try: return fun(*args, **kwargs) except catch as exc: if max_retries and retries >= max_retries: raise tts = float(errback(exc, interval_range, retries) if errback else next(interval_range)) if tts: sleep(tts)
Retry the function over and over until max retries is exceeded. For each retry we sleep a for a while before we try again, this interval is increased for every retry until the max seconds is reached. :param fun: The function to try :param catch: Exceptions to catch, can be either tuple or a single exception class. :keyword args: Positional arguments passed on to the function. :keyword kwargs: Keyword arguments passed on to the function. :keyword max_retries: Maximum number of retries before we give up. If this is not set, we will retry forever. :keyword interval_start: How long (in seconds) we start sleeping between retries. :keyword interval_step: By how much the interval is increased for each retry. :keyword interval_max: Maximum number of seconds to sleep between retries.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/util/retry.py#L65-L100
galaxyproject/pulsar
pulsar/client/manager.py
ClientManager.get_client
def get_client(self, destination_params, job_id, **kwargs): """Build a client given specific destination parameters and job_id.""" destination_params = _parse_destination_params(destination_params) destination_params.update(**kwargs) job_manager_interface_class = self.job_manager_interface_class job_manager_interface_args = dict(destination_params=destination_params, **self.job_manager_interface_args) job_manager_interface = job_manager_interface_class(**job_manager_interface_args) return self.client_class(destination_params, job_id, job_manager_interface, **self.extra_client_kwds)
python
def get_client(self, destination_params, job_id, **kwargs): """Build a client given specific destination parameters and job_id.""" destination_params = _parse_destination_params(destination_params) destination_params.update(**kwargs) job_manager_interface_class = self.job_manager_interface_class job_manager_interface_args = dict(destination_params=destination_params, **self.job_manager_interface_args) job_manager_interface = job_manager_interface_class(**job_manager_interface_args) return self.client_class(destination_params, job_id, job_manager_interface, **self.extra_client_kwds)
Build a client given specific destination parameters and job_id.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/manager.py#L83-L90
galaxyproject/pulsar
pulsar/managers/util/cli/__init__.py
CliInterface.get_plugins
def get_plugins(self, shell_params, job_params): """ Return shell and job interface defined by and configured via specified params. """ shell = self.get_shell_plugin(shell_params) job_interface = self.get_job_interface(job_params) return shell, job_interface
python
def get_plugins(self, shell_params, job_params): """ Return shell and job interface defined by and configured via specified params. """ shell = self.get_shell_plugin(shell_params) job_interface = self.get_job_interface(job_params) return shell, job_interface
Return shell and job interface defined by and configured via specified params.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/util/cli/__init__.py#L49-L56
galaxyproject/pulsar
pulsar/cache/util.py
atomicish_move
def atomicish_move(source, destination, tmp_suffix="_TMP"): """Move source to destination without risk of partial moves. > from tempfile import mkdtemp > from os.path import join, exists > temp_dir = mkdtemp() > source = join(temp_dir, "the_source") > destination = join(temp_dir, "the_dest") > open(source, "wb").write(b"Hello World!") > assert exists(source) > assert not exists(destination) > atomicish_move(source, destination) > assert not exists(source) > assert exists(destination) """ destination_dir = os.path.dirname(destination) destination_name = os.path.basename(destination) temp_destination = os.path.join(destination_dir, "%s%s" % (destination_name, tmp_suffix)) shutil.move(source, temp_destination) os.rename(temp_destination, destination)
python
def atomicish_move(source, destination, tmp_suffix="_TMP"): """Move source to destination without risk of partial moves. > from tempfile import mkdtemp > from os.path import join, exists > temp_dir = mkdtemp() > source = join(temp_dir, "the_source") > destination = join(temp_dir, "the_dest") > open(source, "wb").write(b"Hello World!") > assert exists(source) > assert not exists(destination) > atomicish_move(source, destination) > assert not exists(source) > assert exists(destination) """ destination_dir = os.path.dirname(destination) destination_name = os.path.basename(destination) temp_destination = os.path.join(destination_dir, "%s%s" % (destination_name, tmp_suffix)) shutil.move(source, temp_destination) os.rename(temp_destination, destination)
Move source to destination without risk of partial moves. > from tempfile import mkdtemp > from os.path import join, exists > temp_dir = mkdtemp() > source = join(temp_dir, "the_source") > destination = join(temp_dir, "the_dest") > open(source, "wb").write(b"Hello World!") > assert exists(source) > assert not exists(destination) > atomicish_move(source, destination) > assert not exists(source) > assert exists(destination)
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/cache/util.py#L8-L27
galaxyproject/pulsar
pulsar/managers/util/env.py
env_to_statement
def env_to_statement(env): ''' Return the abstraction description of an environment variable definition into a statement for shell script. >>> env_to_statement(dict(name='X', value='Y')) 'X="Y"; export X' >>> env_to_statement(dict(name='X', value='Y', raw=True)) 'X=Y; export X' >>> env_to_statement(dict(name='X', value='"A","B","C"')) 'X="\\\\"A\\\\",\\\\"B\\\\",\\\\"C\\\\""; export X' >>> env_to_statement(dict(file="Y")) '. "Y"' >>> env_to_statement(dict(file="'RAW $FILE'", raw=True)) ". 'RAW $FILE'" >>> # Source file takes precedence >>> env_to_statement(dict(name='X', value='"A","B","C"', file="S")) '. "S"' >>> env_to_statement(dict(execute="module load java/1.5.1")) 'module load java/1.5.1' ''' source_file = env.get('file', None) if source_file: return '. %s' % __escape(source_file, env) execute = env.get('execute', None) if execute: return execute name = env['name'] value = __escape(env['value'], env) return '%s=%s; export %s' % (name, value, name)
python
def env_to_statement(env): ''' Return the abstraction description of an environment variable definition into a statement for shell script. >>> env_to_statement(dict(name='X', value='Y')) 'X="Y"; export X' >>> env_to_statement(dict(name='X', value='Y', raw=True)) 'X=Y; export X' >>> env_to_statement(dict(name='X', value='"A","B","C"')) 'X="\\\\"A\\\\",\\\\"B\\\\",\\\\"C\\\\""; export X' >>> env_to_statement(dict(file="Y")) '. "Y"' >>> env_to_statement(dict(file="'RAW $FILE'", raw=True)) ". 'RAW $FILE'" >>> # Source file takes precedence >>> env_to_statement(dict(name='X', value='"A","B","C"', file="S")) '. "S"' >>> env_to_statement(dict(execute="module load java/1.5.1")) 'module load java/1.5.1' ''' source_file = env.get('file', None) if source_file: return '. %s' % __escape(source_file, env) execute = env.get('execute', None) if execute: return execute name = env['name'] value = __escape(env['value'], env) return '%s=%s; export %s' % (name, value, name)
Return the abstraction description of an environment variable definition into a statement for shell script. >>> env_to_statement(dict(name='X', value='Y')) 'X="Y"; export X' >>> env_to_statement(dict(name='X', value='Y', raw=True)) 'X=Y; export X' >>> env_to_statement(dict(name='X', value='"A","B","C"')) 'X="\\\\"A\\\\",\\\\"B\\\\",\\\\"C\\\\""; export X' >>> env_to_statement(dict(file="Y")) '. "Y"' >>> env_to_statement(dict(file="'RAW $FILE'", raw=True)) ". 'RAW $FILE'" >>> # Source file takes precedence >>> env_to_statement(dict(name='X', value='"A","B","C"', file="S")) '. "S"' >>> env_to_statement(dict(execute="module load java/1.5.1")) 'module load java/1.5.1'
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/util/env.py#L5-L33
galaxyproject/pulsar
pulsar/util/__init__.py
copy_to_temp
def copy_to_temp(object): """ Copy file-like object to temp file and return path. """ temp_file = NamedTemporaryFile(delete=False) _copy_and_close(object, temp_file) return temp_file.name
python
def copy_to_temp(object): """ Copy file-like object to temp file and return path. """ temp_file = NamedTemporaryFile(delete=False) _copy_and_close(object, temp_file) return temp_file.name
Copy file-like object to temp file and return path.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/util/__init__.py#L27-L34
galaxyproject/pulsar
pulsar/managers/util/condor/__init__.py
build_submit_description
def build_submit_description(executable, output, error, user_log, query_params): """ Build up the contents of a condor submit description file. >>> submit_args = dict(executable='/path/to/script', output='o', error='e', user_log='ul') >>> submit_args['query_params'] = dict() >>> default_description = build_submit_description(**submit_args) >>> assert 'executable = /path/to/script' in default_description >>> assert 'output = o' in default_description >>> assert 'error = e' in default_description >>> assert 'queue' in default_description >>> assert 'universe = vanilla' in default_description >>> assert 'universe = standard' not in default_description >>> submit_args['query_params'] = dict(universe='standard') >>> std_description = build_submit_description(**submit_args) >>> assert 'universe = vanilla' not in std_description >>> assert 'universe = standard' in std_description """ all_query_params = DEFAULT_QUERY_CLASSAD.copy() all_query_params.update(query_params) submit_description = [] for key, value in all_query_params.items(): submit_description.append('%s = %s' % (key, value)) submit_description.append('executable = ' + executable) submit_description.append('output = ' + output) submit_description.append('error = ' + error) submit_description.append('log = ' + user_log) submit_description.append('queue') return '\n'.join(submit_description)
python
def build_submit_description(executable, output, error, user_log, query_params): """ Build up the contents of a condor submit description file. >>> submit_args = dict(executable='/path/to/script', output='o', error='e', user_log='ul') >>> submit_args['query_params'] = dict() >>> default_description = build_submit_description(**submit_args) >>> assert 'executable = /path/to/script' in default_description >>> assert 'output = o' in default_description >>> assert 'error = e' in default_description >>> assert 'queue' in default_description >>> assert 'universe = vanilla' in default_description >>> assert 'universe = standard' not in default_description >>> submit_args['query_params'] = dict(universe='standard') >>> std_description = build_submit_description(**submit_args) >>> assert 'universe = vanilla' not in std_description >>> assert 'universe = standard' in std_description """ all_query_params = DEFAULT_QUERY_CLASSAD.copy() all_query_params.update(query_params) submit_description = [] for key, value in all_query_params.items(): submit_description.append('%s = %s' % (key, value)) submit_description.append('executable = ' + executable) submit_description.append('output = ' + output) submit_description.append('error = ' + error) submit_description.append('log = ' + user_log) submit_description.append('queue') return '\n'.join(submit_description)
Build up the contents of a condor submit description file. >>> submit_args = dict(executable='/path/to/script', output='o', error='e', user_log='ul') >>> submit_args['query_params'] = dict() >>> default_description = build_submit_description(**submit_args) >>> assert 'executable = /path/to/script' in default_description >>> assert 'output = o' in default_description >>> assert 'error = e' in default_description >>> assert 'queue' in default_description >>> assert 'universe = vanilla' in default_description >>> assert 'universe = standard' not in default_description >>> submit_args['query_params'] = dict(universe='standard') >>> std_description = build_submit_description(**submit_args) >>> assert 'universe = vanilla' not in std_description >>> assert 'universe = standard' in std_description
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/util/condor/__init__.py#L39-L68
galaxyproject/pulsar
pulsar/managers/util/condor/__init__.py
condor_submit
def condor_submit(submit_file): """ Submit a condor job described by the given file. Parse an external id for the submission or return None and a reason for the failure. """ external_id = None try: submit = Popen(('condor_submit', submit_file), stdout=PIPE, stderr=STDOUT) message, _ = submit.communicate() if submit.returncode == 0: external_id = parse_external_id(message, type='condor') else: message = PROBLEM_PARSING_EXTERNAL_ID except Exception as e: message = str(e) return external_id, message
python
def condor_submit(submit_file): """ Submit a condor job described by the given file. Parse an external id for the submission or return None and a reason for the failure. """ external_id = None try: submit = Popen(('condor_submit', submit_file), stdout=PIPE, stderr=STDOUT) message, _ = submit.communicate() if submit.returncode == 0: external_id = parse_external_id(message, type='condor') else: message = PROBLEM_PARSING_EXTERNAL_ID except Exception as e: message = str(e) return external_id, message
Submit a condor job described by the given file. Parse an external id for the submission or return None and a reason for the failure.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/util/condor/__init__.py#L71-L86
galaxyproject/pulsar
pulsar/managers/util/condor/__init__.py
condor_stop
def condor_stop(external_id): """ Stop running condor job and return a failure_message if this fails. """ failure_message = None try: check_call(('condor_rm', external_id)) except CalledProcessError: failure_message = "condor_rm failed" except Exception as e: "error encountered calling condor_rm: %s" % e return failure_message
python
def condor_stop(external_id): """ Stop running condor job and return a failure_message if this fails. """ failure_message = None try: check_call(('condor_rm', external_id)) except CalledProcessError: failure_message = "condor_rm failed" except Exception as e: "error encountered calling condor_rm: %s" % e return failure_message
Stop running condor job and return a failure_message if this fails.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/util/condor/__init__.py#L89-L101
galaxyproject/pulsar
pulsar/locks.py
LockManager.get_lock
def get_lock(self, path): """ Get a job lock corresponding to the path - assumes parent directory exists but the file itself does not. """ if self.lockfile: return self.lockfile.LockFile(path) else: with self.job_locks_lock: if path not in self.job_locks: lock = threading.Lock() self.job_locks[path] = lock else: lock = self.job_locks[path] return lock
python
def get_lock(self, path): """ Get a job lock corresponding to the path - assumes parent directory exists but the file itself does not. """ if self.lockfile: return self.lockfile.LockFile(path) else: with self.job_locks_lock: if path not in self.job_locks: lock = threading.Lock() self.job_locks[path] = lock else: lock = self.job_locks[path] return lock
Get a job lock corresponding to the path - assumes parent directory exists but the file itself does not.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/locks.py#L23-L36
galaxyproject/pulsar
pulsar/managers/__init__.py
ManagerProxy.shutdown
def shutdown(self, timeout=None): """ Optional. """ try: shutdown_method = self._proxied_manager.shutdown except AttributeError: return shutdown_method(timeout)
python
def shutdown(self, timeout=None): """ Optional. """ try: shutdown_method = self._proxied_manager.shutdown except AttributeError: return shutdown_method(timeout)
Optional.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/__init__.py#L113-L119
galaxyproject/pulsar
pulsar/client/setup_handler.py
build
def build(client, destination_args): """ Build a SetupHandler object for client from destination parameters. """ # Have defined a remote job directory, lets do the setup locally. if client.job_directory: handler = LocalSetupHandler(client, destination_args) else: handler = RemoteSetupHandler(client) return handler
python
def build(client, destination_args): """ Build a SetupHandler object for client from destination parameters. """ # Have defined a remote job directory, lets do the setup locally. if client.job_directory: handler = LocalSetupHandler(client, destination_args) else: handler = RemoteSetupHandler(client) return handler
Build a SetupHandler object for client from destination parameters.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/setup_handler.py#L10-L18
galaxyproject/pulsar
pulsar/managers/util/drmaa/__init__.py
DrmaaSession.run_job
def run_job(self, **kwds): """ Create a DRMAA job template, populate with specified properties, run the job, and return the external_job_id. """ template = DrmaaSession.session.createJobTemplate() try: for key in kwds: setattr(template, key, kwds[key]) with DrmaaSession.session_lock: return DrmaaSession.session.runJob(template) finally: DrmaaSession.session.deleteJobTemplate(template)
python
def run_job(self, **kwds): """ Create a DRMAA job template, populate with specified properties, run the job, and return the external_job_id. """ template = DrmaaSession.session.createJobTemplate() try: for key in kwds: setattr(template, key, kwds[key]) with DrmaaSession.session_lock: return DrmaaSession.session.runJob(template) finally: DrmaaSession.session.deleteJobTemplate(template)
Create a DRMAA job template, populate with specified properties, run the job, and return the external_job_id.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/util/drmaa/__init__.py#L57-L69
galaxyproject/pulsar
pulsar/client/destination.py
url_to_destination_params
def url_to_destination_params(url): """Convert a legacy runner URL to a job destination >>> params_simple = url_to_destination_params("http://localhost:8913/") >>> params_simple["url"] 'http://localhost:8913/' >>> params_simple["private_token"] is None True >>> advanced_url = "https://[email protected]:8914/managers/longqueue" >>> params_advanced = url_to_destination_params(advanced_url) >>> params_advanced["url"] 'https://example.com:8914/managers/longqueue/' >>> params_advanced["private_token"] '1234x' >>> runner_url = "pulsar://http://localhost:8913/" >>> runner_params = url_to_destination_params(runner_url) >>> runner_params['url'] 'http://localhost:8913/' """ if url.startswith("pulsar://"): url = url[len("pulsar://"):] if not url.endswith("/"): url += "/" # Check for private token embedded in the URL. A URL of the form # https://moo@cow:8913 will try to contact https://cow:8913 # with a private key of moo private_token_format = "https?://(.*)@.*/?" private_token_match = match(private_token_format, url) private_token = None if private_token_match: private_token = private_token_match.group(1) url = url.replace("%s@" % private_token, '', 1) destination_args = {"url": url, "private_token": private_token} return destination_args
python
def url_to_destination_params(url): """Convert a legacy runner URL to a job destination >>> params_simple = url_to_destination_params("http://localhost:8913/") >>> params_simple["url"] 'http://localhost:8913/' >>> params_simple["private_token"] is None True >>> advanced_url = "https://[email protected]:8914/managers/longqueue" >>> params_advanced = url_to_destination_params(advanced_url) >>> params_advanced["url"] 'https://example.com:8914/managers/longqueue/' >>> params_advanced["private_token"] '1234x' >>> runner_url = "pulsar://http://localhost:8913/" >>> runner_params = url_to_destination_params(runner_url) >>> runner_params['url'] 'http://localhost:8913/' """ if url.startswith("pulsar://"): url = url[len("pulsar://"):] if not url.endswith("/"): url += "/" # Check for private token embedded in the URL. A URL of the form # https://moo@cow:8913 will try to contact https://cow:8913 # with a private key of moo private_token_format = "https?://(.*)@.*/?" private_token_match = match(private_token_format, url) private_token = None if private_token_match: private_token = private_token_match.group(1) url = url.replace("%s@" % private_token, '', 1) destination_args = {"url": url, "private_token": private_token} return destination_args
Convert a legacy runner URL to a job destination >>> params_simple = url_to_destination_params("http://localhost:8913/") >>> params_simple["url"] 'http://localhost:8913/' >>> params_simple["private_token"] is None True >>> advanced_url = "https://[email protected]:8914/managers/longqueue" >>> params_advanced = url_to_destination_params(advanced_url) >>> params_advanced["url"] 'https://example.com:8914/managers/longqueue/' >>> params_advanced["private_token"] '1234x' >>> runner_url = "pulsar://http://localhost:8913/" >>> runner_params = url_to_destination_params(runner_url) >>> runner_params['url'] 'http://localhost:8913/'
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/destination.py#L9-L48
galaxyproject/pulsar
pulsar/util/pastescript/serve.py
ensure_port_cleanup
def ensure_port_cleanup(bound_addresses, maxtries=30, sleeptime=2): """ This makes sure any open ports are closed. Does this by connecting to them until they give connection refused. Servers should call like:: import paste.script ensure_port_cleanup([80, 443]) """ atexit.register(_cleanup_ports, bound_addresses, maxtries=maxtries, sleeptime=sleeptime)
python
def ensure_port_cleanup(bound_addresses, maxtries=30, sleeptime=2): """ This makes sure any open ports are closed. Does this by connecting to them until they give connection refused. Servers should call like:: import paste.script ensure_port_cleanup([80, 443]) """ atexit.register(_cleanup_ports, bound_addresses, maxtries=maxtries, sleeptime=sleeptime)
This makes sure any open ports are closed. Does this by connecting to them until they give connection refused. Servers should call like:: import paste.script ensure_port_cleanup([80, 443])
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/util/pastescript/serve.py#L963-L974
galaxyproject/pulsar
pulsar/util/pastescript/serve.py
Command.standard_parser
def standard_parser(cls, verbose=True, interactive=False, no_interactive=False, simulate=False, quiet=False, overwrite=False): """ Create a standard ``OptionParser`` instance. Typically used like:: class MyCommand(Command): parser = Command.standard_parser() Subclasses may redefine ``standard_parser``, so use the nearest superclass's class method. """ parser = BoolOptionParser() if verbose: parser.add_option('-v', '--verbose', action='count', dest='verbose', default=0) if quiet: parser.add_option('-q', '--quiet', action='count', dest='quiet', default=0) if no_interactive: parser.add_option('--no-interactive', action="count", dest="no_interactive", default=0) if interactive: parser.add_option('-i', '--interactive', action='count', dest='interactive', default=0) if simulate: parser.add_option('-n', '--simulate', action='store_true', dest='simulate', default=False) if overwrite: parser.add_option('-f', '--overwrite', dest="overwrite", action="store_true", help="Overwrite files (warnings will be emitted for non-matching files otherwise)") return parser
python
def standard_parser(cls, verbose=True, interactive=False, no_interactive=False, simulate=False, quiet=False, overwrite=False): """ Create a standard ``OptionParser`` instance. Typically used like:: class MyCommand(Command): parser = Command.standard_parser() Subclasses may redefine ``standard_parser``, so use the nearest superclass's class method. """ parser = BoolOptionParser() if verbose: parser.add_option('-v', '--verbose', action='count', dest='verbose', default=0) if quiet: parser.add_option('-q', '--quiet', action='count', dest='quiet', default=0) if no_interactive: parser.add_option('--no-interactive', action="count", dest="no_interactive", default=0) if interactive: parser.add_option('-i', '--interactive', action='count', dest='interactive', default=0) if simulate: parser.add_option('-n', '--simulate', action='store_true', dest='simulate', default=False) if overwrite: parser.add_option('-f', '--overwrite', dest="overwrite", action="store_true", help="Overwrite files (warnings will be emitted for non-matching files otherwise)") return parser
Create a standard ``OptionParser`` instance. Typically used like:: class MyCommand(Command): parser = Command.standard_parser() Subclasses may redefine ``standard_parser``, so use the nearest superclass's class method.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/util/pastescript/serve.py#L256-L304
galaxyproject/pulsar
pulsar/util/pastescript/serve.py
Command.quote_first_command_arg
def quote_first_command_arg(self, arg): """ There's a bug in Windows when running an executable that's located inside a path with a space in it. This method handles that case, or on non-Windows systems or an executable with no spaces, it just leaves well enough alone. """ if (sys.platform != 'win32' or ' ' not in arg): # Problem does not apply: return arg try: import win32api except ImportError: raise ValueError( "The executable %r contains a space, and in order to " "handle this issue you must have the win32api module " "installed" % arg) arg = win32api.GetShortPathName(arg) return arg
python
def quote_first_command_arg(self, arg): """ There's a bug in Windows when running an executable that's located inside a path with a space in it. This method handles that case, or on non-Windows systems or an executable with no spaces, it just leaves well enough alone. """ if (sys.platform != 'win32' or ' ' not in arg): # Problem does not apply: return arg try: import win32api except ImportError: raise ValueError( "The executable %r contains a space, and in order to " "handle this issue you must have the win32api module " "installed" % arg) arg = win32api.GetShortPathName(arg) return arg
There's a bug in Windows when running an executable that's located inside a path with a space in it. This method handles that case, or on non-Windows systems or an executable with no spaces, it just leaves well enough alone.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/util/pastescript/serve.py#L308-L327
galaxyproject/pulsar
pulsar/util/pastescript/serve.py
Command.logging_file_config
def logging_file_config(self, config_file): """ Setup logging via the logging module's fileConfig function with the specified ``config_file``, if applicable. ConfigParser defaults are specified for the special ``__file__`` and ``here`` variables, similar to PasteDeploy config loading. """ parser = ConfigParser.ConfigParser() parser.read([config_file]) if parser.has_section('loggers'): config_file = os.path.abspath(config_file) fileConfig(config_file, dict(__file__=config_file, here=os.path.dirname(config_file)))
python
def logging_file_config(self, config_file): """ Setup logging via the logging module's fileConfig function with the specified ``config_file``, if applicable. ConfigParser defaults are specified for the special ``__file__`` and ``here`` variables, similar to PasteDeploy config loading. """ parser = ConfigParser.ConfigParser() parser.read([config_file]) if parser.has_section('loggers'): config_file = os.path.abspath(config_file) fileConfig(config_file, dict(__file__=config_file, here=os.path.dirname(config_file)))
Setup logging via the logging module's fileConfig function with the specified ``config_file``, if applicable. ConfigParser defaults are specified for the special ``__file__`` and ``here`` variables, similar to PasteDeploy config loading.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/util/pastescript/serve.py#L345-L358
galaxyproject/pulsar
pulsar/client/staging/down.py
finish_job
def finish_job(client, cleanup_job, job_completed_normally, client_outputs, pulsar_outputs): """Process for "un-staging" a complete Pulsar job. This function is responsible for downloading results from remote server and cleaning up Pulsar staging directory (if needed.) """ collection_failure_exceptions = [] if job_completed_normally: output_collector = ClientOutputCollector(client) action_mapper = FileActionMapper(client) results_stager = ResultsCollector(output_collector, action_mapper, client_outputs, pulsar_outputs) collection_failure_exceptions = results_stager.collect() _clean(collection_failure_exceptions, cleanup_job, client) return collection_failure_exceptions
python
def finish_job(client, cleanup_job, job_completed_normally, client_outputs, pulsar_outputs): """Process for "un-staging" a complete Pulsar job. This function is responsible for downloading results from remote server and cleaning up Pulsar staging directory (if needed.) """ collection_failure_exceptions = [] if job_completed_normally: output_collector = ClientOutputCollector(client) action_mapper = FileActionMapper(client) results_stager = ResultsCollector(output_collector, action_mapper, client_outputs, pulsar_outputs) collection_failure_exceptions = results_stager.collect() _clean(collection_failure_exceptions, cleanup_job, client) return collection_failure_exceptions
Process for "un-staging" a complete Pulsar job. This function is responsible for downloading results from remote server and cleaning up Pulsar staging directory (if needed.)
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/staging/down.py#L13-L26
galaxyproject/pulsar
pulsar/client/util.py
copy
def copy(source, destination): """ Copy file from source to destination if needed (skip if source is destination). """ source = os.path.abspath(source) destination = os.path.abspath(destination) if source != destination: if not os.path.exists(os.path.dirname(destination)): os.makedirs(os.path.dirname(destination)) shutil.copyfile(source, destination)
python
def copy(source, destination): """ Copy file from source to destination if needed (skip if source is destination). """ source = os.path.abspath(source) destination = os.path.abspath(destination) if source != destination: if not os.path.exists(os.path.dirname(destination)): os.makedirs(os.path.dirname(destination)) shutil.copyfile(source, destination)
Copy file from source to destination if needed (skip if source is destination).
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/util.py#L76-L85
galaxyproject/pulsar
pulsar/managers/base/base_drmaa.py
BaseDrmaaManager.shutdown
def shutdown(self, timeout=None): """Cleanup DRMAA session and call shutdown of parent.""" try: super(BaseDrmaaManager, self).shutdown(timeout) except Exception: pass self.drmaa_session.close()
python
def shutdown(self, timeout=None): """Cleanup DRMAA session and call shutdown of parent.""" try: super(BaseDrmaaManager, self).shutdown(timeout) except Exception: pass self.drmaa_session.close()
Cleanup DRMAA session and call shutdown of parent.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/base/base_drmaa.py#L31-L37
galaxyproject/pulsar
pulsar/cache/__init__.py
Cache.cache_file
def cache_file(self, local_path, ip, path): """ Move a file from a temporary staging area into the cache. """ destination = self.__destination(ip, path) atomicish_move(local_path, destination)
python
def cache_file(self, local_path, ip, path): """ Move a file from a temporary staging area into the cache. """ destination = self.__destination(ip, path) atomicish_move(local_path, destination)
Move a file from a temporary staging area into the cache.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/cache/__init__.py#L41-L46
galaxyproject/pulsar
pulsar/manager_factory.py
build_managers
def build_managers(app, conf): """ Takes in a config file as outlined in job_managers.ini.sample and builds a dictionary of job manager objects from them. """ # Load default options from config file that apply to all # managers. default_options = _get_default_options(conf) manager_descriptions = ManagerDescriptions() if "job_managers_config" in conf: job_managers_config = conf.get("job_managers_config", None) _populate_manager_descriptions_from_ini(manager_descriptions, job_managers_config) elif "managers" in conf: for manager_name, manager_options in conf["managers"].items(): manager_description = ManagerDescription.from_dict(manager_options, manager_name) manager_descriptions.add(manager_description) elif "manager" in conf: manager_description = ManagerDescription.from_dict(conf["manager"]) manager_descriptions.add(manager_description) else: manager_descriptions.add(ManagerDescription()) manager_classes = _get_managers_dict() managers = {} for manager_name, manager_description in manager_descriptions.descriptions.items(): manager_options = dict(default_options) manager_options.update(manager_description.manager_options) manager_class = manager_classes[manager_description.manager_type] manager = _build_manager(manager_class, app, manager_name, manager_options) managers[manager_name] = manager return managers
python
def build_managers(app, conf): """ Takes in a config file as outlined in job_managers.ini.sample and builds a dictionary of job manager objects from them. """ # Load default options from config file that apply to all # managers. default_options = _get_default_options(conf) manager_descriptions = ManagerDescriptions() if "job_managers_config" in conf: job_managers_config = conf.get("job_managers_config", None) _populate_manager_descriptions_from_ini(manager_descriptions, job_managers_config) elif "managers" in conf: for manager_name, manager_options in conf["managers"].items(): manager_description = ManagerDescription.from_dict(manager_options, manager_name) manager_descriptions.add(manager_description) elif "manager" in conf: manager_description = ManagerDescription.from_dict(conf["manager"]) manager_descriptions.add(manager_description) else: manager_descriptions.add(ManagerDescription()) manager_classes = _get_managers_dict() managers = {} for manager_name, manager_description in manager_descriptions.descriptions.items(): manager_options = dict(default_options) manager_options.update(manager_description.manager_options) manager_class = manager_classes[manager_description.manager_type] manager = _build_manager(manager_class, app, manager_name, manager_options) managers[manager_name] = manager return managers
Takes in a config file as outlined in job_managers.ini.sample and builds a dictionary of job manager objects from them.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/manager_factory.py#L17-L50
galaxyproject/pulsar
pulsar/util/pastescript/loadwsgi.py
fix_type_error
def fix_type_error(exc_info, callable, varargs, kwargs): """ Given an exception, this will test if the exception was due to a signature error, and annotate the error with better information if so. Usage:: try: val = callable(*args, **kw) except TypeError: exc_info = fix_type_error(None, callable, args, kw) raise exc_info[0], exc_info[1], exc_info[2] """ if exc_info is None: exc_info = sys.exc_info() if (exc_info[0] != TypeError or str(exc_info[1]).find('arguments') == -1 or getattr(exc_info[1], '_type_error_fixed', False)): return exc_info exc_info[1]._type_error_fixed = True argspec = inspect.formatargspec(*inspect.getargspec(callable)) args = ', '.join(map(_short_repr, varargs)) if kwargs and args: args += ', ' if kwargs: kwargs = kwargs.items() kwargs.sort() args += ', '.join(['%s=...' % n for n, v in kwargs]) gotspec = '(%s)' % args msg = '%s; got %s, wanted %s' % (exc_info[1], gotspec, argspec) exc_info[1].args = (msg,) return exc_info
python
def fix_type_error(exc_info, callable, varargs, kwargs): """ Given an exception, this will test if the exception was due to a signature error, and annotate the error with better information if so. Usage:: try: val = callable(*args, **kw) except TypeError: exc_info = fix_type_error(None, callable, args, kw) raise exc_info[0], exc_info[1], exc_info[2] """ if exc_info is None: exc_info = sys.exc_info() if (exc_info[0] != TypeError or str(exc_info[1]).find('arguments') == -1 or getattr(exc_info[1], '_type_error_fixed', False)): return exc_info exc_info[1]._type_error_fixed = True argspec = inspect.formatargspec(*inspect.getargspec(callable)) args = ', '.join(map(_short_repr, varargs)) if kwargs and args: args += ', ' if kwargs: kwargs = kwargs.items() kwargs.sort() args += ', '.join(['%s=...' % n for n, v in kwargs]) gotspec = '(%s)' % args msg = '%s; got %s, wanted %s' % (exc_info[1], gotspec, argspec) exc_info[1].args = (msg,) return exc_info
Given an exception, this will test if the exception was due to a signature error, and annotate the error with better information if so. Usage:: try: val = callable(*args, **kw) except TypeError: exc_info = fix_type_error(None, callable, args, kw) raise exc_info[0], exc_info[1], exc_info[2]
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/util/pastescript/loadwsgi.py#L50-L82
galaxyproject/pulsar
pulsar/util/pastescript/loadwsgi.py
fix_call
def fix_call(callable, *args, **kw): """ Call ``callable(*args, **kw)`` fixing any type errors that come out. """ try: val = callable(*args, **kw) except TypeError: exc_info = fix_type_error(None, callable, args, kw) reraise(*exc_info) return val
python
def fix_call(callable, *args, **kw): """ Call ``callable(*args, **kw)`` fixing any type errors that come out. """ try: val = callable(*args, **kw) except TypeError: exc_info = fix_type_error(None, callable, args, kw) reraise(*exc_info) return val
Call ``callable(*args, **kw)`` fixing any type errors that come out.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/util/pastescript/loadwsgi.py#L92-L101
galaxyproject/pulsar
pulsar/util/pastescript/loadwsgi.py
lookup_object
def lookup_object(spec): """ Looks up a module or object from a some.module:func_name specification. To just look up a module, omit the colon and everything after it. """ parts, target = spec.split(':') if ':' in spec else (spec, None) module = __import__(parts) for part in parts.split('.')[1:] + ([target] if target else []): module = getattr(module, part) return module
python
def lookup_object(spec): """ Looks up a module or object from a some.module:func_name specification. To just look up a module, omit the colon and everything after it. """ parts, target = spec.split(':') if ':' in spec else (spec, None) module = __import__(parts) for part in parts.split('.')[1:] + ([target] if target else []): module = getattr(module, part) return module
Looks up a module or object from a some.module:func_name specification. To just look up a module, omit the colon and everything after it.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/util/pastescript/loadwsgi.py#L104-L115
galaxyproject/pulsar
pulsar/util/pastescript/loadwsgi.py
_flatten
def _flatten(lst): """ Flatten a nested list. """ if not isinstance(lst, (list, tuple)): return [lst] result = [] for item in lst: result.extend(_flatten(item)) return result
python
def _flatten(lst): """ Flatten a nested list. """ if not isinstance(lst, (list, tuple)): return [lst] result = [] for item in lst: result.extend(_flatten(item)) return result
Flatten a nested list.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/util/pastescript/loadwsgi.py#L141-L150
galaxyproject/pulsar
pulsar/util/pastescript/loadwsgi.py
NicerConfigParser.defaults
def defaults(self): """Return the defaults, with their values interpolated (with the defaults dict itself) Mainly to support defaults using values such as %(here)s """ defaults = ConfigParser.defaults(self).copy() for key, val in iteritems(defaults): defaults[key] = self.get('DEFAULT', key) or val return defaults
python
def defaults(self): """Return the defaults, with their values interpolated (with the defaults dict itself) Mainly to support defaults using values such as %(here)s """ defaults = ConfigParser.defaults(self).copy() for key, val in iteritems(defaults): defaults[key] = self.get('DEFAULT', key) or val return defaults
Return the defaults, with their values interpolated (with the defaults dict itself) Mainly to support defaults using values such as %(here)s
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/util/pastescript/loadwsgi.py#L163-L172
galaxyproject/pulsar
pulsar/util/pastescript/loadwsgi.py
ConfigLoader.find_config_section
def find_config_section(self, object_type, name=None): """ Return the section name with the given name prefix (following the same pattern as ``protocol_desc`` in ``config``. It must have the given name, or for ``'main'`` an empty name is allowed. The prefix must be followed by a ``:``. Case is *not* ignored. """ possible = [] for name_options in object_type.config_prefixes: for name_prefix in name_options: found = self._find_sections( self.parser.sections(), name_prefix, name) if found: possible.extend(found) break if not possible: raise LookupError( "No section %r (prefixed by %s) found in config %s" % (name, ' or '.join(map(repr, _flatten(object_type.config_prefixes))), self.filename)) if len(possible) > 1: raise LookupError( "Ambiguous section names %r for section %r (prefixed by %s) " "found in config %s" % (possible, name, ' or '.join(map(repr, _flatten(object_type.config_prefixes))), self.filename)) return possible[0]
python
def find_config_section(self, object_type, name=None): """ Return the section name with the given name prefix (following the same pattern as ``protocol_desc`` in ``config``. It must have the given name, or for ``'main'`` an empty name is allowed. The prefix must be followed by a ``:``. Case is *not* ignored. """ possible = [] for name_options in object_type.config_prefixes: for name_prefix in name_options: found = self._find_sections( self.parser.sections(), name_prefix, name) if found: possible.extend(found) break if not possible: raise LookupError( "No section %r (prefixed by %s) found in config %s" % (name, ' or '.join(map(repr, _flatten(object_type.config_prefixes))), self.filename)) if len(possible) > 1: raise LookupError( "Ambiguous section names %r for section %r (prefixed by %s) " "found in config %s" % (possible, name, ' or '.join(map(repr, _flatten(object_type.config_prefixes))), self.filename)) return possible[0]
Return the section name with the given name prefix (following the same pattern as ``protocol_desc`` in ``config``. It must have the given name, or for ``'main'`` an empty name is allowed. The prefix must be followed by a ``:``. Case is *not* ignored.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/util/pastescript/loadwsgi.py#L668-L698
galaxyproject/pulsar
pulsar/util/pastescript/loadwsgi.py
EggLoader.find_egg_entry_point
def find_egg_entry_point(self, object_type, name=None): """ Returns the (entry_point, protocol) for the with the given ``name``. """ if name is None: name = 'main' possible = [] for protocol_options in object_type.egg_protocols: for protocol in protocol_options: pkg_resources.require(self.spec) entry = pkg_resources.get_entry_info( self.spec, protocol, name) if entry is not None: possible.append((entry.load(), protocol, entry.name)) break if not possible: # Better exception dist = pkg_resources.get_distribution(self.spec) raise LookupError( "Entry point %r not found in egg %r (dir: %s; protocols: %s; " "entry_points: %s)" % (name, self.spec, dist.location, ', '.join(_flatten(object_type.egg_protocols)), ', '.join(_flatten([ dictkeys(pkg_resources.get_entry_info(self.spec, prot, name) or {}) for prot in protocol_options] or '(no entry points)')))) if len(possible) > 1: raise LookupError( "Ambiguous entry points for %r in egg %r (protocols: %s)" % (name, self.spec, ', '.join(_flatten(protocol_options)))) return possible[0]
python
def find_egg_entry_point(self, object_type, name=None): """ Returns the (entry_point, protocol) for the with the given ``name``. """ if name is None: name = 'main' possible = [] for protocol_options in object_type.egg_protocols: for protocol in protocol_options: pkg_resources.require(self.spec) entry = pkg_resources.get_entry_info( self.spec, protocol, name) if entry is not None: possible.append((entry.load(), protocol, entry.name)) break if not possible: # Better exception dist = pkg_resources.get_distribution(self.spec) raise LookupError( "Entry point %r not found in egg %r (dir: %s; protocols: %s; " "entry_points: %s)" % (name, self.spec, dist.location, ', '.join(_flatten(object_type.egg_protocols)), ', '.join(_flatten([ dictkeys(pkg_resources.get_entry_info(self.spec, prot, name) or {}) for prot in protocol_options] or '(no entry points)')))) if len(possible) > 1: raise LookupError( "Ambiguous entry points for %r in egg %r (protocols: %s)" % (name, self.spec, ', '.join(_flatten(protocol_options)))) return possible[0]
Returns the (entry_point, protocol) for the with the given ``name``.
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/util/pastescript/loadwsgi.py#L733-L767