repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
ChristianTremblay/BAC0 | BAC0/core/utils/notes.py | note_and_log | def note_and_log(cls):
"""
This will be used as a decorator on class to activate
logging and store messages in the variable cls._notes
This will allow quick access to events in the web app.
A note can be added to cls._notes without logging if passing
the argument log=false to function note()
Something can be logged without addind a note using function log()
"""
if hasattr(cls, "DEBUG_LEVEL"):
if cls.DEBUG_LEVEL == "debug":
file_level = logging.DEBUG
console_level = logging.DEBUG
elif cls.DEBUG_LEVEL == "info":
file_level = logging.INFO
console_level = logging.INFO
else:
file_level = logging.WARNING
console_level = logging.INFO
# Notes object
cls._notes = namedtuple("_notes", ["timestamp", "notes"])
cls._notes.timestamp = []
cls._notes.notes = []
# Defining log object
cls.logname = "{} | {}".format(cls.__module__, cls.__name__)
root_logger = logging.getLogger()
cls._log = logging.getLogger("BAC0")
if not len(root_logger.handlers):
root_logger.addHandler(cls._log)
# Console Handler
ch = logging.StreamHandler()
ch.set_name("stderr")
ch2 = logging.StreamHandler(sys.stdout)
ch2.set_name("stdout")
ch.setLevel(console_level)
ch2.setLevel(logging.CRITICAL)
formatter = logging.Formatter("{asctime} - {levelname:<8}| {message}", style="{")
# File Handler
_PERMISSION_TO_WRITE = True
logUserPath = expanduser("~")
logSaveFilePath = join(logUserPath, ".BAC0")
logFile = join(logSaveFilePath, "BAC0.log")
if not os.path.exists(logSaveFilePath):
try:
os.makedirs(logSaveFilePath)
except:
_PERMISSION_TO_WRITE = False
if _PERMISSION_TO_WRITE:
fh = FileHandler(logFile)
fh.set_name("file_handler")
fh.setLevel(file_level)
fh.setFormatter(formatter)
ch.setFormatter(formatter)
ch2.setFormatter(formatter)
# Add handlers the first time only...
if not len(cls._log.handlers):
if _PERMISSION_TO_WRITE:
cls._log.addHandler(fh)
cls._log.addHandler(ch)
cls._log.addHandler(ch2)
# cls._log.setLevel(logging.CRITICAL)
def log_title(self, title, args=None, width=35):
cls._log.debug("")
cls._log.debug("#" * width)
cls._log.debug("# {}".format(title))
cls._log.debug("#" * width)
if args:
cls._log.debug("{!r}".format(args))
cls._log.debug("#" * 35)
def log_subtitle(self, subtitle, args=None, width=35):
cls._log.debug("")
cls._log.debug("=" * width)
cls._log.debug("{}".format(subtitle))
cls._log.debug("=" * width)
if args:
cls._log.debug("{!r}".format(args))
cls._log.debug("=" * width)
def log(self, note, *, level=logging.DEBUG):
"""
Add a log entry...no note
"""
if not note:
raise ValueError("Provide something to log")
note = "{} | {}".format(cls.logname, note)
cls._log.log(level, note)
def note(self, note, *, level=logging.INFO, log=True):
"""
Add note to the object. By default, the note will also
be logged
:param note: (str) The note itself
:param level: (logging.level)
:param log: (boolean) Enable or disable logging of note
"""
if not note:
raise ValueError("Provide something to log")
note = "{} | {}".format(cls.logname, note)
cls._notes.timestamp.append(datetime.now())
cls._notes.notes.append(note)
if log:
cls.log(level, note)
@property
def notes(self):
"""
Retrieve notes list as a Pandas Series
"""
if not _PANDAS:
return dict(zip(self._notes.timestamp, self._notes.notes))
return pd.Series(self._notes.notes, index=self._notes.timestamp)
def clear_notes(self):
"""
Clear notes object
"""
cls._notes.timestamp = []
cls._notes.notes = []
# Add the functions to the decorated class
cls.clear_notes = clear_notes
cls.note = note
cls.notes = notes
cls.log = log
cls.log_title = log_title
cls.log_subtitle = log_subtitle
return cls | python | def note_and_log(cls):
"""
This will be used as a decorator on class to activate
logging and store messages in the variable cls._notes
This will allow quick access to events in the web app.
A note can be added to cls._notes without logging if passing
the argument log=false to function note()
Something can be logged without addind a note using function log()
"""
if hasattr(cls, "DEBUG_LEVEL"):
if cls.DEBUG_LEVEL == "debug":
file_level = logging.DEBUG
console_level = logging.DEBUG
elif cls.DEBUG_LEVEL == "info":
file_level = logging.INFO
console_level = logging.INFO
else:
file_level = logging.WARNING
console_level = logging.INFO
# Notes object
cls._notes = namedtuple("_notes", ["timestamp", "notes"])
cls._notes.timestamp = []
cls._notes.notes = []
# Defining log object
cls.logname = "{} | {}".format(cls.__module__, cls.__name__)
root_logger = logging.getLogger()
cls._log = logging.getLogger("BAC0")
if not len(root_logger.handlers):
root_logger.addHandler(cls._log)
# Console Handler
ch = logging.StreamHandler()
ch.set_name("stderr")
ch2 = logging.StreamHandler(sys.stdout)
ch2.set_name("stdout")
ch.setLevel(console_level)
ch2.setLevel(logging.CRITICAL)
formatter = logging.Formatter("{asctime} - {levelname:<8}| {message}", style="{")
# File Handler
_PERMISSION_TO_WRITE = True
logUserPath = expanduser("~")
logSaveFilePath = join(logUserPath, ".BAC0")
logFile = join(logSaveFilePath, "BAC0.log")
if not os.path.exists(logSaveFilePath):
try:
os.makedirs(logSaveFilePath)
except:
_PERMISSION_TO_WRITE = False
if _PERMISSION_TO_WRITE:
fh = FileHandler(logFile)
fh.set_name("file_handler")
fh.setLevel(file_level)
fh.setFormatter(formatter)
ch.setFormatter(formatter)
ch2.setFormatter(formatter)
# Add handlers the first time only...
if not len(cls._log.handlers):
if _PERMISSION_TO_WRITE:
cls._log.addHandler(fh)
cls._log.addHandler(ch)
cls._log.addHandler(ch2)
# cls._log.setLevel(logging.CRITICAL)
def log_title(self, title, args=None, width=35):
cls._log.debug("")
cls._log.debug("#" * width)
cls._log.debug("# {}".format(title))
cls._log.debug("#" * width)
if args:
cls._log.debug("{!r}".format(args))
cls._log.debug("#" * 35)
def log_subtitle(self, subtitle, args=None, width=35):
cls._log.debug("")
cls._log.debug("=" * width)
cls._log.debug("{}".format(subtitle))
cls._log.debug("=" * width)
if args:
cls._log.debug("{!r}".format(args))
cls._log.debug("=" * width)
def log(self, note, *, level=logging.DEBUG):
"""
Add a log entry...no note
"""
if not note:
raise ValueError("Provide something to log")
note = "{} | {}".format(cls.logname, note)
cls._log.log(level, note)
def note(self, note, *, level=logging.INFO, log=True):
"""
Add note to the object. By default, the note will also
be logged
:param note: (str) The note itself
:param level: (logging.level)
:param log: (boolean) Enable or disable logging of note
"""
if not note:
raise ValueError("Provide something to log")
note = "{} | {}".format(cls.logname, note)
cls._notes.timestamp.append(datetime.now())
cls._notes.notes.append(note)
if log:
cls.log(level, note)
@property
def notes(self):
"""
Retrieve notes list as a Pandas Series
"""
if not _PANDAS:
return dict(zip(self._notes.timestamp, self._notes.notes))
return pd.Series(self._notes.notes, index=self._notes.timestamp)
def clear_notes(self):
"""
Clear notes object
"""
cls._notes.timestamp = []
cls._notes.notes = []
# Add the functions to the decorated class
cls.clear_notes = clear_notes
cls.note = note
cls.notes = notes
cls.log = log
cls.log_title = log_title
cls.log_subtitle = log_subtitle
return cls | This will be used as a decorator on class to activate
logging and store messages in the variable cls._notes
This will allow quick access to events in the web app.
A note can be added to cls._notes without logging if passing
the argument log=false to function note()
Something can be logged without addind a note using function log() | https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/utils/notes.py#L88-L223 |
ChristianTremblay/BAC0 | BAC0/core/functions/discoverPoints.py | discoverPoints | def discoverPoints(bacnetapp, address, devID):
"""
Discover the BACnet points in a BACnet device.
:param bacnetApp: The app itself so we can call read
:param address: address of the device as a string (ex. '2:5')
:param devID: device ID of the bacnet device as a string (ex. '1001')
:returns: a tuple with deviceName, pss, objList, df
* *deviceName* : name of the device
* *pss* : protocole service supported
* *objList* : list of bacnet object (ex. analogInput, 1)
* *df* : is a dataFrame containing pointType, pointAddress, pointName, description
presentValue and units
If pandas can't be found, df will be a simple array
"""
pss = bacnetapp.read(
"{} device {} protocolServicesSupported".format(address, devID)
)
deviceName = bacnetapp.read("{} device {} objectName".format(address, devID))
# print('Device {}- building points list'.format(deviceName))
objList = bacnetapp.read("{} device {] objectList".format(address, devID))
newLine = []
result = []
points = []
for pointType, pointAddr in objList:
if "binary" in pointType: # BI/BO/BV
newLine = [pointType, pointAddr]
infos = bacnetapp.readMultiple(
"{} {} {} objectName description presentValue inactiveText activeText".format(
address, pointType, pointAddr
)
)
newLine.extend(infos[:-2])
newLine.extend([infos[-2:]])
newPoint = BooleanPoint(
pointType=newLine[0],
pointAddress=newLine[1],
pointName=newLine[2],
description=newLine[3],
presentValue=newLine[4],
units_state=newLine[5],
)
elif "multiState" in pointType: # MI/MV/MO
newLine = [pointType, pointAddr]
newLine.extend(
bacnetapp.readMultiple(
"{} {} {} objectName description presentValue stateText".format(
address, pointType, pointAddr
)
)
)
newPoint = EnumPoint(
pointType=newLine[0],
pointAddress=newLine[1],
pointName=newLine[2],
description=newLine[3],
presentValue=newLine[4],
units_state=newLine[5],
)
elif "analog" in pointType: # AI/AO/AV
newLine = [pointType, pointAddr]
newLine.extend(
bacnetapp.readMultiple(
"{} {} {} objectName description presentValue units".format(
address, pointType, pointAddr
)
)
)
newPoint = NumericPoint(
pointType=newLine[0],
pointAddress=newLine[1],
pointName=newLine[2],
description=newLine[3],
presentValue=newLine[4],
units_state=newLine[5],
)
else:
continue # skip
result.append(newLine)
points.append(newPoint)
if _PANDA:
df = pd.DataFrame(
result,
columns=[
"pointType",
"pointAddress",
"pointName",
"description",
"presentValue",
"units_state",
],
).set_index(["pointName"])
else:
df = result
# print('Ready!')
return (deviceName, pss, objList, df, points) | python | def discoverPoints(bacnetapp, address, devID):
"""
Discover the BACnet points in a BACnet device.
:param bacnetApp: The app itself so we can call read
:param address: address of the device as a string (ex. '2:5')
:param devID: device ID of the bacnet device as a string (ex. '1001')
:returns: a tuple with deviceName, pss, objList, df
* *deviceName* : name of the device
* *pss* : protocole service supported
* *objList* : list of bacnet object (ex. analogInput, 1)
* *df* : is a dataFrame containing pointType, pointAddress, pointName, description
presentValue and units
If pandas can't be found, df will be a simple array
"""
pss = bacnetapp.read(
"{} device {} protocolServicesSupported".format(address, devID)
)
deviceName = bacnetapp.read("{} device {} objectName".format(address, devID))
# print('Device {}- building points list'.format(deviceName))
objList = bacnetapp.read("{} device {] objectList".format(address, devID))
newLine = []
result = []
points = []
for pointType, pointAddr in objList:
if "binary" in pointType: # BI/BO/BV
newLine = [pointType, pointAddr]
infos = bacnetapp.readMultiple(
"{} {} {} objectName description presentValue inactiveText activeText".format(
address, pointType, pointAddr
)
)
newLine.extend(infos[:-2])
newLine.extend([infos[-2:]])
newPoint = BooleanPoint(
pointType=newLine[0],
pointAddress=newLine[1],
pointName=newLine[2],
description=newLine[3],
presentValue=newLine[4],
units_state=newLine[5],
)
elif "multiState" in pointType: # MI/MV/MO
newLine = [pointType, pointAddr]
newLine.extend(
bacnetapp.readMultiple(
"{} {} {} objectName description presentValue stateText".format(
address, pointType, pointAddr
)
)
)
newPoint = EnumPoint(
pointType=newLine[0],
pointAddress=newLine[1],
pointName=newLine[2],
description=newLine[3],
presentValue=newLine[4],
units_state=newLine[5],
)
elif "analog" in pointType: # AI/AO/AV
newLine = [pointType, pointAddr]
newLine.extend(
bacnetapp.readMultiple(
"{} {} {} objectName description presentValue units".format(
address, pointType, pointAddr
)
)
)
newPoint = NumericPoint(
pointType=newLine[0],
pointAddress=newLine[1],
pointName=newLine[2],
description=newLine[3],
presentValue=newLine[4],
units_state=newLine[5],
)
else:
continue # skip
result.append(newLine)
points.append(newPoint)
if _PANDA:
df = pd.DataFrame(
result,
columns=[
"pointType",
"pointAddress",
"pointName",
"description",
"presentValue",
"units_state",
],
).set_index(["pointName"])
else:
df = result
# print('Ready!')
return (deviceName, pss, objList, df, points) | Discover the BACnet points in a BACnet device.
:param bacnetApp: The app itself so we can call read
:param address: address of the device as a string (ex. '2:5')
:param devID: device ID of the bacnet device as a string (ex. '1001')
:returns: a tuple with deviceName, pss, objList, df
* *deviceName* : name of the device
* *pss* : protocole service supported
* *objList* : list of bacnet object (ex. analogInput, 1)
* *df* : is a dataFrame containing pointType, pointAddress, pointName, description
presentValue and units
If pandas can't be found, df will be a simple array | https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/functions/discoverPoints.py#L28-L139 |
regebro/svg.path | src/svg/path/path.py | CubicBezier.is_smooth_from | def is_smooth_from(self, previous):
"""Checks if this segment would be a smooth segment following the previous"""
if isinstance(previous, CubicBezier):
return (self.start == previous.end and
(self.control1 - self.start) == (previous.end - previous.control2))
else:
return self.control1 == self.start | python | def is_smooth_from(self, previous):
"""Checks if this segment would be a smooth segment following the previous"""
if isinstance(previous, CubicBezier):
return (self.start == previous.end and
(self.control1 - self.start) == (previous.end - previous.control2))
else:
return self.control1 == self.start | Checks if this segment would be a smooth segment following the previous | https://github.com/regebro/svg.path/blob/cb58e104e5aa3472be205c75da59690db30aecc9/src/svg/path/path.py#L83-L89 |
regebro/svg.path | src/svg/path/path.py | CubicBezier.point | def point(self, pos):
"""Calculate the x,y position at a certain position of the path"""
return ((1 - pos) ** 3 * self.start) + \
(3 * (1 - pos) ** 2 * pos * self.control1) + \
(3 * (1 - pos) * pos ** 2 * self.control2) + \
(pos ** 3 * self.end) | python | def point(self, pos):
"""Calculate the x,y position at a certain position of the path"""
return ((1 - pos) ** 3 * self.start) + \
(3 * (1 - pos) ** 2 * pos * self.control1) + \
(3 * (1 - pos) * pos ** 2 * self.control2) + \
(pos ** 3 * self.end) | Calculate the x,y position at a certain position of the path | https://github.com/regebro/svg.path/blob/cb58e104e5aa3472be205c75da59690db30aecc9/src/svg/path/path.py#L91-L96 |
regebro/svg.path | src/svg/path/path.py | CubicBezier.length | def length(self, error=ERROR, min_depth=MIN_DEPTH):
"""Calculate the length of the path up to a certain position"""
start_point = self.point(0)
end_point = self.point(1)
return segment_length(self, 0, 1, start_point, end_point, error, min_depth, 0) | python | def length(self, error=ERROR, min_depth=MIN_DEPTH):
"""Calculate the length of the path up to a certain position"""
start_point = self.point(0)
end_point = self.point(1)
return segment_length(self, 0, 1, start_point, end_point, error, min_depth, 0) | Calculate the length of the path up to a certain position | https://github.com/regebro/svg.path/blob/cb58e104e5aa3472be205c75da59690db30aecc9/src/svg/path/path.py#L98-L102 |
regebro/svg.path | src/svg/path/path.py | QuadraticBezier.is_smooth_from | def is_smooth_from(self, previous):
"""Checks if this segment would be a smooth segment following the previous"""
if isinstance(previous, QuadraticBezier):
return (self.start == previous.end and
(self.control - self.start) == (previous.end - previous.control))
else:
return self.control == self.start | python | def is_smooth_from(self, previous):
"""Checks if this segment would be a smooth segment following the previous"""
if isinstance(previous, QuadraticBezier):
return (self.start == previous.end and
(self.control - self.start) == (previous.end - previous.control))
else:
return self.control == self.start | Checks if this segment would be a smooth segment following the previous | https://github.com/regebro/svg.path/blob/cb58e104e5aa3472be205c75da59690db30aecc9/src/svg/path/path.py#L126-L132 |
zeekay/flask-uwsgi-websocket | flask_uwsgi_websocket/websocket.py | WebSocket.register_blueprint | def register_blueprint(self, blueprint, **options):
'''
Registers a blueprint on the WebSockets.
'''
first_registration = False
if blueprint.name in self.blueprints:
assert self.blueprints[blueprint.name] is blueprint, \
'A blueprint\'s name collision occurred between %r and ' \
'%r. Both share the same name "%s". Blueprints that ' \
'are created on the fly need unique names.' % \
(blueprint, self.blueprints[blueprint.name], blueprint.name)
else:
self.blueprints[blueprint.name] = blueprint
first_registration = True
blueprint.register(self, options, first_registration) | python | def register_blueprint(self, blueprint, **options):
'''
Registers a blueprint on the WebSockets.
'''
first_registration = False
if blueprint.name in self.blueprints:
assert self.blueprints[blueprint.name] is blueprint, \
'A blueprint\'s name collision occurred between %r and ' \
'%r. Both share the same name "%s". Blueprints that ' \
'are created on the fly need unique names.' % \
(blueprint, self.blueprints[blueprint.name], blueprint.name)
else:
self.blueprints[blueprint.name] = blueprint
first_registration = True
blueprint.register(self, options, first_registration) | Registers a blueprint on the WebSockets. | https://github.com/zeekay/flask-uwsgi-websocket/blob/d0264d220d570a37100ef01be10a0f01fef1e9df/flask_uwsgi_websocket/websocket.py#L151-L165 |
mgedmin/findimports | findimports.py | adjust_lineno | def adjust_lineno(filename, lineno, name):
"""Adjust the line number of an import.
Needed because import statements can span multiple lines, and our lineno
is always the first line number.
"""
line = linecache.getline(filename, lineno)
# Hack warning: might be fooled by comments
rx = re.compile(r'\b%s\b' % re.escape(name) if name != '*' else '[*]')
while line and not rx.search(line):
lineno += 1
line = linecache.getline(filename, lineno)
return lineno | python | def adjust_lineno(filename, lineno, name):
"""Adjust the line number of an import.
Needed because import statements can span multiple lines, and our lineno
is always the first line number.
"""
line = linecache.getline(filename, lineno)
# Hack warning: might be fooled by comments
rx = re.compile(r'\b%s\b' % re.escape(name) if name != '*' else '[*]')
while line and not rx.search(line):
lineno += 1
line = linecache.getline(filename, lineno)
return lineno | Adjust the line number of an import.
Needed because import statements can span multiple lines, and our lineno
is always the first line number. | https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L89-L101 |
mgedmin/findimports | findimports.py | find_imports | def find_imports(filename):
"""Find all imported names in a given file.
Returns a list of ImportInfo objects.
"""
with open(filename) as f:
root = ast.parse(f.read(), filename)
visitor = ImportFinder(filename)
visitor.visit(root)
return visitor.imports | python | def find_imports(filename):
"""Find all imported names in a given file.
Returns a list of ImportInfo objects.
"""
with open(filename) as f:
root = ast.parse(f.read(), filename)
visitor = ImportFinder(filename)
visitor.visit(root)
return visitor.imports | Find all imported names in a given file.
Returns a list of ImportInfo objects. | https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L316-L325 |
mgedmin/findimports | findimports.py | find_imports_and_track_names | def find_imports_and_track_names(filename, warn_about_duplicates=False,
verbose=False):
"""Find all imported names in a given file.
Returns ``(imports, unused)``. Both are lists of ImportInfo objects.
"""
with open(filename) as f:
root = ast.parse(f.read(), filename)
visitor = ImportFinderAndNameTracker(filename)
visitor.warn_about_duplicates = warn_about_duplicates
visitor.verbose = verbose
visitor.visit(root)
visitor.leaveAllScopes()
return visitor.imports, visitor.unused_names | python | def find_imports_and_track_names(filename, warn_about_duplicates=False,
verbose=False):
"""Find all imported names in a given file.
Returns ``(imports, unused)``. Both are lists of ImportInfo objects.
"""
with open(filename) as f:
root = ast.parse(f.read(), filename)
visitor = ImportFinderAndNameTracker(filename)
visitor.warn_about_duplicates = warn_about_duplicates
visitor.verbose = verbose
visitor.visit(root)
visitor.leaveAllScopes()
return visitor.imports, visitor.unused_names | Find all imported names in a given file.
Returns ``(imports, unused)``. Both are lists of ImportInfo objects. | https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L328-L341 |
mgedmin/findimports | findimports.py | ModuleGraph.parsePathname | def parsePathname(self, pathname):
"""Parse one or more source files.
``pathname`` may be a file name or a directory name.
"""
if os.path.isdir(pathname):
for root, dirs, files in os.walk(pathname):
dirs.sort()
files.sort()
for fn in files:
# ignore emacsish junk
if fn.endswith('.py') and not fn.startswith('.#'):
self.parseFile(os.path.join(root, fn))
elif pathname.endswith('.importcache'):
self.readCache(pathname)
else:
self.parseFile(pathname) | python | def parsePathname(self, pathname):
"""Parse one or more source files.
``pathname`` may be a file name or a directory name.
"""
if os.path.isdir(pathname):
for root, dirs, files in os.walk(pathname):
dirs.sort()
files.sort()
for fn in files:
# ignore emacsish junk
if fn.endswith('.py') and not fn.startswith('.#'):
self.parseFile(os.path.join(root, fn))
elif pathname.endswith('.importcache'):
self.readCache(pathname)
else:
self.parseFile(pathname) | Parse one or more source files.
``pathname`` may be a file name or a directory name. | https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L417-L433 |
mgedmin/findimports | findimports.py | ModuleGraph.writeCache | def writeCache(self, filename):
"""Write the graph to a cache file."""
with open(filename, 'wb') as f:
pickle.dump(self.modules, f) | python | def writeCache(self, filename):
"""Write the graph to a cache file."""
with open(filename, 'wb') as f:
pickle.dump(self.modules, f) | Write the graph to a cache file. | https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L435-L438 |
mgedmin/findimports | findimports.py | ModuleGraph.readCache | def readCache(self, filename):
"""Load the graph from a cache file."""
with open(filename, 'rb') as f:
self.modules = pickle.load(f) | python | def readCache(self, filename):
"""Load the graph from a cache file."""
with open(filename, 'rb') as f:
self.modules = pickle.load(f) | Load the graph from a cache file. | https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L440-L443 |
mgedmin/findimports | findimports.py | ModuleGraph.parseFile | def parseFile(self, filename):
"""Parse a single file."""
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names]) | python | def parseFile(self, filename):
"""Parse a single file."""
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names]) | Parse a single file. | https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L445-L461 |
mgedmin/findimports | findimports.py | ModuleGraph.filenameToModname | def filenameToModname(self, filename):
"""Convert a filename to a module name."""
for ext in reversed(self._exts):
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
else:
self.warn(filename, '%s: unknown file name extension', filename)
filename = os.path.abspath(filename)
elements = filename.split(os.path.sep)
modname = []
while elements:
modname.append(elements[-1])
del elements[-1]
if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):
break
modname.reverse()
modname = ".".join(modname)
return modname | python | def filenameToModname(self, filename):
"""Convert a filename to a module name."""
for ext in reversed(self._exts):
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
else:
self.warn(filename, '%s: unknown file name extension', filename)
filename = os.path.abspath(filename)
elements = filename.split(os.path.sep)
modname = []
while elements:
modname.append(elements[-1])
del elements[-1]
if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):
break
modname.reverse()
modname = ".".join(modname)
return modname | Convert a filename to a module name. | https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L463-L481 |
mgedmin/findimports | findimports.py | ModuleGraph.findModuleOfName | def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name | python | def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name | Given a fully qualified name, find what module contains it. | https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L483-L511 |
mgedmin/findimports | findimports.py | ModuleGraph.isModule | def isModule(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a module?"""
try:
return self._module_cache[(dotted_name, extrapath)]
except KeyError:
pass
if dotted_name in sys.modules or dotted_name in self.builtin_modules:
return dotted_name
filename = dotted_name.replace('.', os.path.sep)
if extrapath:
for ext in self._exts:
candidate = os.path.join(extrapath, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
return modname
try:
return self._module_cache[(dotted_name, None)]
except KeyError:
pass
for dir in self.path:
if os.path.isfile(dir):
if dir.endswith('.egg-info'):
# distribute creates a setuptools-blah-blah.egg-info
# that ends up in sys.path
continue
try:
zf = zipfile.ZipFile(dir)
except zipfile.BadZipfile:
self.warn(dir, "%s: not a directory or zip file", dir)
continue
names = zf.namelist()
for ext in self._exts:
candidate = filename + ext
if candidate in names:
modname = filename.replace(os.path.sep, '.')
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
else:
for ext in self._exts:
candidate = os.path.join(dir, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
return None | python | def isModule(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a module?"""
try:
return self._module_cache[(dotted_name, extrapath)]
except KeyError:
pass
if dotted_name in sys.modules or dotted_name in self.builtin_modules:
return dotted_name
filename = dotted_name.replace('.', os.path.sep)
if extrapath:
for ext in self._exts:
candidate = os.path.join(extrapath, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
return modname
try:
return self._module_cache[(dotted_name, None)]
except KeyError:
pass
for dir in self.path:
if os.path.isfile(dir):
if dir.endswith('.egg-info'):
# distribute creates a setuptools-blah-blah.egg-info
# that ends up in sys.path
continue
try:
zf = zipfile.ZipFile(dir)
except zipfile.BadZipfile:
self.warn(dir, "%s: not a directory or zip file", dir)
continue
names = zf.namelist()
for ext in self._exts:
candidate = filename + ext
if candidate in names:
modname = filename.replace(os.path.sep, '.')
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
else:
for ext in self._exts:
candidate = os.path.join(dir, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
return None | Is ``dotted_name`` the name of a module? | https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L513-L560 |
mgedmin/findimports | findimports.py | ModuleGraph.isPackage | def isPackage(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a package?"""
candidate = self.isModule(dotted_name + '.__init__', extrapath)
if candidate:
candidate = candidate[:-len(".__init__")]
return candidate | python | def isPackage(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a package?"""
candidate = self.isModule(dotted_name + '.__init__', extrapath)
if candidate:
candidate = candidate[:-len(".__init__")]
return candidate | Is ``dotted_name`` the name of a package? | https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L562-L567 |
mgedmin/findimports | findimports.py | ModuleGraph.packageOf | def packageOf(self, dotted_name, packagelevel=None):
"""Determine the package that contains ``dotted_name``."""
if '.' not in dotted_name:
return dotted_name
if not self.isPackage(dotted_name):
dotted_name = '.'.join(dotted_name.split('.')[:-1])
if packagelevel:
dotted_name = '.'.join(dotted_name.split('.')[:packagelevel])
return dotted_name | python | def packageOf(self, dotted_name, packagelevel=None):
"""Determine the package that contains ``dotted_name``."""
if '.' not in dotted_name:
return dotted_name
if not self.isPackage(dotted_name):
dotted_name = '.'.join(dotted_name.split('.')[:-1])
if packagelevel:
dotted_name = '.'.join(dotted_name.split('.')[:packagelevel])
return dotted_name | Determine the package that contains ``dotted_name``. | https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L569-L577 |
mgedmin/findimports | findimports.py | ModuleGraph.listModules | def listModules(self):
"""Return an alphabetical list of all modules."""
modules = list(self.modules.items())
modules.sort()
return [module for name, module in modules] | python | def listModules(self):
"""Return an alphabetical list of all modules."""
modules = list(self.modules.items())
modules.sort()
return [module for name, module in modules] | Return an alphabetical list of all modules. | https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L590-L594 |
mgedmin/findimports | findimports.py | ModuleGraph.packageGraph | def packageGraph(self, packagelevel=None):
"""Convert a module graph to a package graph."""
packages = {}
for module in self.listModules():
package_name = self.packageOf(module.modname, packagelevel)
if package_name not in packages:
dirname = os.path.dirname(module.filename)
packages[package_name] = Module(package_name, dirname)
package = packages[package_name]
for name in module.imports:
package_name = self.packageOf(name, packagelevel)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph | python | def packageGraph(self, packagelevel=None):
"""Convert a module graph to a package graph."""
packages = {}
for module in self.listModules():
package_name = self.packageOf(module.modname, packagelevel)
if package_name not in packages:
dirname = os.path.dirname(module.filename)
packages[package_name] = Module(package_name, dirname)
package = packages[package_name]
for name in module.imports:
package_name = self.packageOf(name, packagelevel)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph | Convert a module graph to a package graph. | https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L596-L611 |
mgedmin/findimports | findimports.py | ModuleGraph.collapseCycles | def collapseCycles(self):
"""Create a graph with cycles collapsed.
Collapse modules participating in a cycle to a single node.
"""
# This algorithm determines Strongly Connected Components. Look it up.
# It is adapted to suit our data structures.
# Phase 0: prepare the graph
imports = {}
for u in self.modules:
imports[u] = set()
for v in self.modules[u].imports:
if v in self.modules: # skip external dependencies
imports[u].add(v)
# Phase 1: order the vertices
visited = {}
for u in self.modules:
visited[u] = False
order = []
def visit1(u):
visited[u] = True
for v in imports[u]:
if not visited[v]:
visit1(v)
order.append(u)
for u in self.modules:
if not visited[u]:
visit1(u)
order.reverse()
# Phase 2: compute the inverse graph
revimports = {}
for u in self.modules:
revimports[u] = set()
for u in self.modules:
for v in imports[u]:
revimports[v].add(u)
# Phase 3: determine the strongly connected components
components = {}
component_of = {}
for u in self.modules:
visited[u] = False
def visit2(u):
visited[u] = True
component.append(u)
for v in revimports[u]:
if not visited[v]:
visit2(v)
for u in order:
if not visited[u]:
component = []
visit2(u)
component.sort()
node = ModuleCycle(component)
components[node.modname] = node
for modname in component:
component_of[modname] = node
# Phase 4: construct the condensed graph
for node in components.values():
for modname in node.modnames:
for impname in imports[modname]:
other = component_of[impname].modname
if other != node.modname:
node.imports.add(other)
graph = ModuleGraph()
graph.modules = components
return graph | python | def collapseCycles(self):
"""Create a graph with cycles collapsed.
Collapse modules participating in a cycle to a single node.
"""
# This algorithm determines Strongly Connected Components. Look it up.
# It is adapted to suit our data structures.
# Phase 0: prepare the graph
imports = {}
for u in self.modules:
imports[u] = set()
for v in self.modules[u].imports:
if v in self.modules: # skip external dependencies
imports[u].add(v)
# Phase 1: order the vertices
visited = {}
for u in self.modules:
visited[u] = False
order = []
def visit1(u):
visited[u] = True
for v in imports[u]:
if not visited[v]:
visit1(v)
order.append(u)
for u in self.modules:
if not visited[u]:
visit1(u)
order.reverse()
# Phase 2: compute the inverse graph
revimports = {}
for u in self.modules:
revimports[u] = set()
for u in self.modules:
for v in imports[u]:
revimports[v].add(u)
# Phase 3: determine the strongly connected components
components = {}
component_of = {}
for u in self.modules:
visited[u] = False
def visit2(u):
visited[u] = True
component.append(u)
for v in revimports[u]:
if not visited[v]:
visit2(v)
for u in order:
if not visited[u]:
component = []
visit2(u)
component.sort()
node = ModuleCycle(component)
components[node.modname] = node
for modname in component:
component_of[modname] = node
# Phase 4: construct the condensed graph
for node in components.values():
for modname in node.modnames:
for impname in imports[modname]:
other = component_of[impname].modname
if other != node.modname:
node.imports.add(other)
graph = ModuleGraph()
graph.modules = components
return graph | Create a graph with cycles collapsed.
Collapse modules participating in a cycle to a single node. | https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L634-L699 |
mgedmin/findimports | findimports.py | ModuleGraph.printImportedNames | def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names)) | python | def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names)) | Produce a report of imported names. | https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L701-L705 |
mgedmin/findimports | findimports.py | ModuleGraph.printImports | def printImports(self):
"""Produce a report of dependencies."""
for module in self.listModules():
print("%s:" % module.label)
if self.external_dependencies:
imports = list(module.imports)
else:
imports = [modname for modname in module.imports
if modname in self.modules]
imports.sort()
print(" %s" % "\n ".join(imports)) | python | def printImports(self):
"""Produce a report of dependencies."""
for module in self.listModules():
print("%s:" % module.label)
if self.external_dependencies:
imports = list(module.imports)
else:
imports = [modname for modname in module.imports
if modname in self.modules]
imports.sort()
print(" %s" % "\n ".join(imports)) | Produce a report of dependencies. | https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L707-L717 |
mgedmin/findimports | findimports.py | ModuleGraph.printUnusedImports | def printUnusedImports(self):
"""Produce a report of unused imports."""
for module in self.listModules():
names = [(unused.lineno, unused.name)
for unused in module.unused_names]
names.sort()
for lineno, name in names:
if not self.all_unused:
line = linecache.getline(module.filename, lineno)
if '#' in line:
# assume there's a comment explaining why it's not used
continue
print("%s:%s: %s not used" % (module.filename, lineno, name)) | python | def printUnusedImports(self):
"""Produce a report of unused imports."""
for module in self.listModules():
names = [(unused.lineno, unused.name)
for unused in module.unused_names]
names.sort()
for lineno, name in names:
if not self.all_unused:
line = linecache.getline(module.filename, lineno)
if '#' in line:
# assume there's a comment explaining why it's not used
continue
print("%s:%s: %s not used" % (module.filename, lineno, name)) | Produce a report of unused imports. | https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L719-L731 |
mgedmin/findimports | findimports.py | ModuleGraph.printDot | def printDot(self):
"""Produce a dependency graph in dot format."""
print("digraph ModuleDependencies {")
print(" node[shape=box];")
allNames = set()
nameDict = {}
for n, module in enumerate(self.listModules()):
module._dot_name = "mod%d" % n
nameDict[module.modname] = module._dot_name
print(" %s[label=\"%s\"];" % (module._dot_name,
quote(module.label)))
allNames |= module.imports
print(" node[style=dotted];")
if self.external_dependencies:
myNames = set(self.modules)
extNames = list(allNames - myNames)
extNames.sort()
for n, name in enumerate(extNames):
nameDict[name] = id = "extmod%d" % n
print(" %s[label=\"%s\"];" % (id, name))
for modname, module in sorted(self.modules.items()):
for other in sorted(module.imports):
if other in nameDict:
print(" %s -> %s;" % (nameDict[module.modname],
nameDict[other]))
print("}") | python | def printDot(self):
"""Produce a dependency graph in dot format."""
print("digraph ModuleDependencies {")
print(" node[shape=box];")
allNames = set()
nameDict = {}
for n, module in enumerate(self.listModules()):
module._dot_name = "mod%d" % n
nameDict[module.modname] = module._dot_name
print(" %s[label=\"%s\"];" % (module._dot_name,
quote(module.label)))
allNames |= module.imports
print(" node[style=dotted];")
if self.external_dependencies:
myNames = set(self.modules)
extNames = list(allNames - myNames)
extNames.sort()
for n, name in enumerate(extNames):
nameDict[name] = id = "extmod%d" % n
print(" %s[label=\"%s\"];" % (id, name))
for modname, module in sorted(self.modules.items()):
for other in sorted(module.imports):
if other in nameDict:
print(" %s -> %s;" % (nameDict[module.modname],
nameDict[other]))
print("}") | Produce a dependency graph in dot format. | https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L733-L758 |
marianoguerra/rst2html5 | html5css3/html.py | quote | def quote(text):
"""encode html entities"""
text = unicode(text)
return text.translate({
ord('&'): u'&',
ord('<'): u'<',
ord('"'): u'"',
ord('>'): u'>',
ord('@'): u'@',
0xa0: u' '}) | python | def quote(text):
"""encode html entities"""
text = unicode(text)
return text.translate({
ord('&'): u'&',
ord('<'): u'<',
ord('"'): u'"',
ord('>'): u'>',
ord('@'): u'@',
0xa0: u' '}) | encode html entities | https://github.com/marianoguerra/rst2html5/blob/667f2c384e7b9315e86e894a455062660d7b0334/html5css3/html.py#L19-L28 |
marianoguerra/rst2html5 | html5css3/html.py | _create_tags | def _create_tags(ctx):
"create all classes and put them in ctx"
for (tag, info) in _TAGS.items():
class_name = tag.title()
quote_, compact, self_closing, docs = info
def __init__(self, *childs, **attrs):
TagBase.__init__(self, childs, attrs)
cls = type(class_name, (TagBase,), {
"__doc__": docs,
"__init__": __init__
})
cls.QUOTE = quote_
cls.COMPACT = compact
cls.SELF_CLOSING = self_closing
ctx[class_name] = cls | python | def _create_tags(ctx):
"create all classes and put them in ctx"
for (tag, info) in _TAGS.items():
class_name = tag.title()
quote_, compact, self_closing, docs = info
def __init__(self, *childs, **attrs):
TagBase.__init__(self, childs, attrs)
cls = type(class_name, (TagBase,), {
"__doc__": docs,
"__init__": __init__
})
cls.QUOTE = quote_
cls.COMPACT = compact
cls.SELF_CLOSING = self_closing
ctx[class_name] = cls | create all classes and put them in ctx | https://github.com/marianoguerra/rst2html5/blob/667f2c384e7b9315e86e894a455062660d7b0334/html5css3/html.py#L256-L275 |
marianoguerra/rst2html5 | html5css3/html.py | tag_from_element | def tag_from_element(el):
"""
Convert an Element into a Tag.
``el`` is an instance of ``Element``. Returns an instance of the
corresponding subclass of ``TagBase``.
"""
tag = el.tag
namespace = None
if tag.startswith('{'):
# Strip namespace of the form "{namespace}tag"
namespace,tag = tag[1:].split('}')
try:
cls = globals()[tag.title()]
if not issubclass(cls, TagBase):
raise KeyError()
except KeyError:
raise ValueError("TagBase doesn't have a subclass for '%s'." % tag)
children = [tag_from_element(c) for c in el]
tag = cls(*children, **el.attrib)
tag.text = el.text
tag.tail = el.tail
if namespace:
tag.attrib['xmlns'] = namespace
return tag | python | def tag_from_element(el):
"""
Convert an Element into a Tag.
``el`` is an instance of ``Element``. Returns an instance of the
corresponding subclass of ``TagBase``.
"""
tag = el.tag
namespace = None
if tag.startswith('{'):
# Strip namespace of the form "{namespace}tag"
namespace,tag = tag[1:].split('}')
try:
cls = globals()[tag.title()]
if not issubclass(cls, TagBase):
raise KeyError()
except KeyError:
raise ValueError("TagBase doesn't have a subclass for '%s'." % tag)
children = [tag_from_element(c) for c in el]
tag = cls(*children, **el.attrib)
tag.text = el.text
tag.tail = el.tail
if namespace:
tag.attrib['xmlns'] = namespace
return tag | Convert an Element into a Tag.
``el`` is an instance of ``Element``. Returns an instance of the
corresponding subclass of ``TagBase``. | https://github.com/marianoguerra/rst2html5/blob/667f2c384e7b9315e86e894a455062660d7b0334/html5css3/html.py#L280-L304 |
marianoguerra/rst2html5 | html5css3/html.py | html_to_tags | def html_to_tags(code):
"""
Convert HTML code to tags.
``code`` is a string containing HTML code. The return value is a
list of corresponding instances of ``TagBase``.
"""
code = ('<div>' + code + '</div>').encode('utf8')
el = ET.fromstring(code)
return [tag_from_element(c) for c in el] | python | def html_to_tags(code):
"""
Convert HTML code to tags.
``code`` is a string containing HTML code. The return value is a
list of corresponding instances of ``TagBase``.
"""
code = ('<div>' + code + '</div>').encode('utf8')
el = ET.fromstring(code)
return [tag_from_element(c) for c in el] | Convert HTML code to tags.
``code`` is a string containing HTML code. The return value is a
list of corresponding instances of ``TagBase``. | https://github.com/marianoguerra/rst2html5/blob/667f2c384e7b9315e86e894a455062660d7b0334/html5css3/html.py#L307-L316 |
marianoguerra/rst2html5 | html5css3/__init__.py | HTMLTranslator._init_math_handler | def _init_math_handler(self):
"""
Parse math configuration and set up math handler.
"""
fields = self.settings.math_output.split(None, 1)
name = fields[0].lower()
option = fields[1] if len(fields) > 1 else None
if name == 'html':
option = self.settings.math_css or option
self.math_handler = HTMLMathHandler(css_filename=option)
elif name == 'mathml':
if option:
raise ValueError(('Math handler "%s" does not support ' +
'option "%s".') % (name, option))
self.math_handler = MathMLMathHandler()
elif name == 'mathjax':
# The MathJax handler can be configured via different ways:
#
# - By passing an additional JS url to "--math-output"
# (to stay backwards-compatible with docutils)
#
# - By using "--mathjax-opts" (to stay backwards compatible
# with the previous html5css3 mathjax postprocessor)
#
# - By using "--mathjax-url" and "--mathjax-config" (the
# preferred way)
js_url = option
config = None
if self.settings.mathjax_opts:
parts = self.settings.mathjax_opts.split(',')
options = dict(part.split('=', 1) for part in parts)
js_url = options.get('url', js_url)
config = options.get('config', config)
js_url = self.settings.mathjax_url or js_url
config = self.settings.mathjax_config or config
self.math_handler = MathJaxMathHandler(js_url=js_url,
config_filename=config)
elif name == 'latex':
if option:
raise ValueError(('Math handler "%s" does not support ' +
'option "%s".') % (name, option))
self.math_handler = LaTeXMathHandler()
else:
raise ValueError('Unknown math handler "%s".' % name) | python | def _init_math_handler(self):
"""
Parse math configuration and set up math handler.
"""
fields = self.settings.math_output.split(None, 1)
name = fields[0].lower()
option = fields[1] if len(fields) > 1 else None
if name == 'html':
option = self.settings.math_css or option
self.math_handler = HTMLMathHandler(css_filename=option)
elif name == 'mathml':
if option:
raise ValueError(('Math handler "%s" does not support ' +
'option "%s".') % (name, option))
self.math_handler = MathMLMathHandler()
elif name == 'mathjax':
# The MathJax handler can be configured via different ways:
#
# - By passing an additional JS url to "--math-output"
# (to stay backwards-compatible with docutils)
#
# - By using "--mathjax-opts" (to stay backwards compatible
# with the previous html5css3 mathjax postprocessor)
#
# - By using "--mathjax-url" and "--mathjax-config" (the
# preferred way)
js_url = option
config = None
if self.settings.mathjax_opts:
parts = self.settings.mathjax_opts.split(',')
options = dict(part.split('=', 1) for part in parts)
js_url = options.get('url', js_url)
config = options.get('config', config)
js_url = self.settings.mathjax_url or js_url
config = self.settings.mathjax_config or config
self.math_handler = MathJaxMathHandler(js_url=js_url,
config_filename=config)
elif name == 'latex':
if option:
raise ValueError(('Math handler "%s" does not support ' +
'option "%s".') % (name, option))
self.math_handler = LaTeXMathHandler()
else:
raise ValueError('Unknown math handler "%s".' % name) | Parse math configuration and set up math handler. | https://github.com/marianoguerra/rst2html5/blob/667f2c384e7b9315e86e894a455062660d7b0334/html5css3/__init__.py#L495-L538 |
marianoguerra/rst2html5 | html5css3/__init__.py | HTMLTranslator.append_default_stylesheets | def append_default_stylesheets(self):
"""
Appends the default styles defined on the translator settings.
"""
for style in utils.get_stylesheet_list(self.settings):
self.css(style) | python | def append_default_stylesheets(self):
"""
Appends the default styles defined on the translator settings.
"""
for style in utils.get_stylesheet_list(self.settings):
self.css(style) | Appends the default styles defined on the translator settings. | https://github.com/marianoguerra/rst2html5/blob/667f2c384e7b9315e86e894a455062660d7b0334/html5css3/__init__.py#L540-L545 |
marianoguerra/rst2html5 | html5css3/__init__.py | HTMLTranslator.css | def css(self, path):
"""
Link/embed CSS file.
"""
if self.settings.embed_content:
content = codecs.open(path, 'r', encoding='utf8').read()
tag = Style(content, type="text/css")
else:
tag = Link(href=path, rel="stylesheet", type_="text/css")
self.head.append(tag) | python | def css(self, path):
"""
Link/embed CSS file.
"""
if self.settings.embed_content:
content = codecs.open(path, 'r', encoding='utf8').read()
tag = Style(content, type="text/css")
else:
tag = Link(href=path, rel="stylesheet", type_="text/css")
self.head.append(tag) | Link/embed CSS file. | https://github.com/marianoguerra/rst2html5/blob/667f2c384e7b9315e86e894a455062660d7b0334/html5css3/__init__.py#L547-L556 |
ggaughan/pipe2py | pipe2py/lib/pprint2.py | repr_args | def repr_args(args):
"""formats a list of function arguments prettily but as working code
(kwargs are tuples (argname, argvalue)
"""
res = []
for x in args:
if isinstance(x, tuple) and len(x) == 2:
key, value = x
# todo: exclude this key if value is its default
res += ["%s=%s" % (key, repr_arg(value))]
else:
res += [repr_arg(x)]
return ', '.join(res) | python | def repr_args(args):
"""formats a list of function arguments prettily but as working code
(kwargs are tuples (argname, argvalue)
"""
res = []
for x in args:
if isinstance(x, tuple) and len(x) == 2:
key, value = x
# todo: exclude this key if value is its default
res += ["%s=%s" % (key, repr_arg(value))]
else:
res += [repr_arg(x)]
return ', '.join(res) | formats a list of function arguments prettily but as working code
(kwargs are tuples (argname, argvalue) | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/lib/pprint2.py#L20-L33 |
ggaughan/pipe2py | pipe2py/lib/pprint2.py | repr_arg | def repr_arg(d):
"""formats a function argument prettily but as working code
unicode encodable as ascii is formatted as str"""
if isinstance(d, dict):
# if d can be expressed in key=value syntax:
return "{%s}" % ", ".join(
"%s: %s" % (repr_arg(k), repr_arg(v)) for k, v in d.items())
if isinstance(d, list):
return "[%s]" % ", ".join(repr_arg(elem) for elem in d)
if isinstance(d, unicode):
try:
return repr(d.encode("ascii"))
except UnicodeEncodeError:
return repr(d)
return repr(d) | python | def repr_arg(d):
"""formats a function argument prettily but as working code
unicode encodable as ascii is formatted as str"""
if isinstance(d, dict):
# if d can be expressed in key=value syntax:
return "{%s}" % ", ".join(
"%s: %s" % (repr_arg(k), repr_arg(v)) for k, v in d.items())
if isinstance(d, list):
return "[%s]" % ", ".join(repr_arg(elem) for elem in d)
if isinstance(d, unicode):
try:
return repr(d.encode("ascii"))
except UnicodeEncodeError:
return repr(d)
return repr(d) | formats a function argument prettily but as working code
unicode encodable as ascii is formatted as str | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/lib/pprint2.py#L36-L52 |
ggaughan/pipe2py | pipe2py/lib/pprint2.py | str_args | def str_args(args):
"""formats a list of function arguments prettily not as code
(kwargs are tuples (argname, argvalue)
"""
res = []
for x in args:
if isinstance(x, tuple) and len(x) == 2:
key, value = x
if value and str_arg(value):
res += ["%s=%s" % (key, str_arg(value))]
else:
res += [str_arg(x)]
return ', '.join(res) | python | def str_args(args):
"""formats a list of function arguments prettily not as code
(kwargs are tuples (argname, argvalue)
"""
res = []
for x in args:
if isinstance(x, tuple) and len(x) == 2:
key, value = x
if value and str_arg(value):
res += ["%s=%s" % (key, str_arg(value))]
else:
res += [str_arg(x)]
return ', '.join(res) | formats a list of function arguments prettily not as code
(kwargs are tuples (argname, argvalue) | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/lib/pprint2.py#L55-L68 |
ggaughan/pipe2py | pipe2py/lib/pprint2.py | str_arg | def str_arg(d):
"""formats a function argument prettily not as code
dicts are expressed in {key=value} syntax
strings are formatted using str in quotes not repr"""
if not d:
return None
if isinstance(d, dict):
if len(d) == 2 and d.get('type') == 'text' and 'value' in d:
return str_arg(d['value'])
if len(d) == 2 and d.get('type') == 'text' and 'subkey' in d:
return ".%s" % d['subkey']
if d.get('type') == 'module':
return None
return "{%s}" % str_args(d.items())
if isinstance(d, list):
if len(d) == 1:
return str_arg(d[0])
return "[%s]" % ", ".join(str_arg(elem) for elem in d)
if isinstance(d, unicode):
return '"%s"' % d
return repr(d) | python | def str_arg(d):
"""formats a function argument prettily not as code
dicts are expressed in {key=value} syntax
strings are formatted using str in quotes not repr"""
if not d:
return None
if isinstance(d, dict):
if len(d) == 2 and d.get('type') == 'text' and 'value' in d:
return str_arg(d['value'])
if len(d) == 2 and d.get('type') == 'text' and 'subkey' in d:
return ".%s" % d['subkey']
if d.get('type') == 'module':
return None
return "{%s}" % str_args(d.items())
if isinstance(d, list):
if len(d) == 1:
return str_arg(d[0])
return "[%s]" % ", ".join(str_arg(elem) for elem in d)
if isinstance(d, unicode):
return '"%s"' % d
return repr(d) | formats a function argument prettily not as code
dicts are expressed in {key=value} syntax
strings are formatted using str in quotes not repr | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/lib/pprint2.py#L71-L93 |
ggaughan/pipe2py | pipe2py/modules/pipehash.py | asyncPipeHash | def asyncPipeHash(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that asynchronously hashes the given text. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of hashed strings
"""
splits = yield asyncGetSplits(_INPUT, conf, **cdicts(opts, kwargs))
parsed = yield asyncDispatch(splits, *get_async_dispatch_funcs())
_OUTPUT = yield asyncStarMap(partial(maybeDeferred, parse_result), parsed)
returnValue(iter(_OUTPUT)) | python | def asyncPipeHash(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that asynchronously hashes the given text. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of hashed strings
"""
splits = yield asyncGetSplits(_INPUT, conf, **cdicts(opts, kwargs))
parsed = yield asyncDispatch(splits, *get_async_dispatch_funcs())
_OUTPUT = yield asyncStarMap(partial(maybeDeferred, parse_result), parsed)
returnValue(iter(_OUTPUT)) | A string module that asynchronously hashes the given text. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of hashed strings | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipehash.py#L28-L43 |
ggaughan/pipe2py | pipe2py/modules/pipetail.py | pipe_tail | def pipe_tail(context=None, _INPUT=None, conf=None, **kwargs):
"""Returns a specified number of items from the bottom of a feed.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
kwargs -- terminal, if the truncation value is wired in
conf : count -- length of the truncated feed, if specified literally
Yields
------
_OUTPUT : items
"""
conf = DotDict(conf)
limit = conf.get('count', func=int, **kwargs)
for item in deque(_INPUT, limit):
yield item | python | def pipe_tail(context=None, _INPUT=None, conf=None, **kwargs):
"""Returns a specified number of items from the bottom of a feed.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
kwargs -- terminal, if the truncation value is wired in
conf : count -- length of the truncated feed, if specified literally
Yields
------
_OUTPUT : items
"""
conf = DotDict(conf)
limit = conf.get('count', func=int, **kwargs)
for item in deque(_INPUT, limit):
yield item | Returns a specified number of items from the bottom of a feed.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
kwargs -- terminal, if the truncation value is wired in
conf : count -- length of the truncated feed, if specified literally
Yields
------
_OUTPUT : items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipetail.py#L14-L32 |
ggaughan/pipe2py | pipe2py/lib/topsort.py | get_graph_component | def get_graph_component(graph):
""" Identify strongly connected components in a graph using
Tarjan's algorithm.
graph should be a dictionary mapping node names to
lists of successor nodes.
"""
components = map(partial(_visit, graph=graph), graph)
node_component = dict(_gen_node_component(components))
graph_component = {component: [] for component in components}
graph_component.update(
dict(_gen_graph_component(graph, node_component, _gen_graph_value)))
return graph_component | python | def get_graph_component(graph):
""" Identify strongly connected components in a graph using
Tarjan's algorithm.
graph should be a dictionary mapping node names to
lists of successor nodes.
"""
components = map(partial(_visit, graph=graph), graph)
node_component = dict(_gen_node_component(components))
graph_component = {component: [] for component in components}
graph_component.update(
dict(_gen_graph_component(graph, node_component, _gen_graph_value)))
return graph_component | Identify strongly connected components in a graph using
Tarjan's algorithm.
graph should be a dictionary mapping node names to
lists of successor nodes. | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/lib/topsort.py#L58-L71 |
ggaughan/pipe2py | pipe2py/modules/pipestrregex.py | asyncPipeStrregex | def asyncPipeStrregex(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that asynchronously replaces text using regexes. Each
has the general format: "In [field] replace [regex pattern] with [text]".
Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings
conf : {
'RULE': [
{'match': {'value': <regex1>}, 'replace': {'value': <'text1'>}},
{'match': {'value': <regex2>}, 'replace': {'value': <'text2'>}},
{'match': {'value': <regex3>}, 'replace': {'value': <'text3'>}},
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of replaced strings
"""
splits = yield asyncGetSplits(_INPUT, conf['RULE'], **cdicts(opts, kwargs))
first = partial(maybeDeferred, convert_func)
asyncFuncs = get_async_dispatch_funcs(first=first)
parsed = yield asyncDispatch(splits, *asyncFuncs)
_OUTPUT = yield asyncStarMap(asyncParseResult, parsed)
returnValue(iter(_OUTPUT)) | python | def asyncPipeStrregex(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that asynchronously replaces text using regexes. Each
has the general format: "In [field] replace [regex pattern] with [text]".
Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings
conf : {
'RULE': [
{'match': {'value': <regex1>}, 'replace': {'value': <'text1'>}},
{'match': {'value': <regex2>}, 'replace': {'value': <'text2'>}},
{'match': {'value': <regex3>}, 'replace': {'value': <'text3'>}},
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of replaced strings
"""
splits = yield asyncGetSplits(_INPUT, conf['RULE'], **cdicts(opts, kwargs))
first = partial(maybeDeferred, convert_func)
asyncFuncs = get_async_dispatch_funcs(first=first)
parsed = yield asyncDispatch(splits, *asyncFuncs)
_OUTPUT = yield asyncStarMap(asyncParseResult, parsed)
returnValue(iter(_OUTPUT)) | A string module that asynchronously replaces text using regexes. Each
has the general format: "In [field] replace [regex pattern] with [text]".
Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings
conf : {
'RULE': [
{'match': {'value': <regex1>}, 'replace': {'value': <'text1'>}},
{'match': {'value': <regex2>}, 'replace': {'value': <'text2'>}},
{'match': {'value': <regex3>}, 'replace': {'value': <'text3'>}},
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of replaced strings | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipestrregex.py#L33-L59 |
ggaughan/pipe2py | pipe2py/modules/pipestrregex.py | pipe_strregex | def pipe_strregex(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that replaces text using regexes. Each has the general
format: "In [field] replace [regex pattern] with [text]". Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : iterable of items or strings
conf : {
'RULE': [
{
'match': {'value': <regex>},
'replace': {'value': <'replacement'>}
}
]
}
Returns
-------
_OUTPUT : generator of replaced strings
"""
splits = get_splits(_INPUT, conf['RULE'], **cdicts(opts, kwargs))
parsed = utils.dispatch(splits, *get_dispatch_funcs(first=convert_func))
_OUTPUT = starmap(parse_result, parsed)
return _OUTPUT | python | def pipe_strregex(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that replaces text using regexes. Each has the general
format: "In [field] replace [regex pattern] with [text]". Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : iterable of items or strings
conf : {
'RULE': [
{
'match': {'value': <regex>},
'replace': {'value': <'replacement'>}
}
]
}
Returns
-------
_OUTPUT : generator of replaced strings
"""
splits = get_splits(_INPUT, conf['RULE'], **cdicts(opts, kwargs))
parsed = utils.dispatch(splits, *get_dispatch_funcs(first=convert_func))
_OUTPUT = starmap(parse_result, parsed)
return _OUTPUT | A string module that replaces text using regexes. Each has the general
format: "In [field] replace [regex pattern] with [text]". Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : iterable of items or strings
conf : {
'RULE': [
{
'match': {'value': <regex>},
'replace': {'value': <'replacement'>}
}
]
}
Returns
-------
_OUTPUT : generator of replaced strings | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipestrregex.py#L67-L91 |
ggaughan/pipe2py | pipe2py/modules/pipexpathfetchpage.py | pipe_xpathfetchpage | def pipe_xpathfetchpage(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that fetches the content of a given website as DOM nodes or a
string. Loopable.
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : dict
URL -- url object contain the URL to download
xpath -- xpath to extract
html5 -- use html5 parser?
useAsString -- emit items as string?
TODOS:
- don't retrieve pages larger than 1.5MB
- don't retrieve if page is not indexable.
Yields
------
_OUTPUT : items
"""
conf = DotDict(conf)
urls = utils.listize(conf['URL'])
for item in _INPUT:
for item_url in urls:
url = utils.get_value(DotDict(item_url), DotDict(item), **kwargs)
url = utils.get_abspath(url)
f = urlopen(url)
# TODO: it seems that Yahoo! converts relative links to
# absolute. This needs to be done on the content but seems to
# be a non-trival task python?
content = unicode(f.read(), 'utf-8')
if context and context.verbose:
print '............Content .................'
print content
print '...............EOF...................'
xpath = conf.get('xpath', **kwargs)
html5 = conf.get('html5', **kwargs) == 'true'
use_as_string = conf.get('useAsString', **kwargs) == 'true'
tree = html5parser.parse(f) if html5 else html.parse(f)
root = tree.getroot()
items = root.xpath(xpath)
if context and context.verbose:
print 'XPathFetchPage: found count items:', len(items)
for etree in items:
i = utils.etree_to_dict(etree)
if context and context.verbose:
print '--------------item data --------------------'
print i
print '--------------EOF item data ----------------'
if use_as_string:
yield {'content': unicode(i)}
else:
yield i
if item.get('forever'):
# _INPUT is pipeforever and not a loop,
# so we just yield our item once
break | python | def pipe_xpathfetchpage(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that fetches the content of a given website as DOM nodes or a
string. Loopable.
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : dict
URL -- url object contain the URL to download
xpath -- xpath to extract
html5 -- use html5 parser?
useAsString -- emit items as string?
TODOS:
- don't retrieve pages larger than 1.5MB
- don't retrieve if page is not indexable.
Yields
------
_OUTPUT : items
"""
conf = DotDict(conf)
urls = utils.listize(conf['URL'])
for item in _INPUT:
for item_url in urls:
url = utils.get_value(DotDict(item_url), DotDict(item), **kwargs)
url = utils.get_abspath(url)
f = urlopen(url)
# TODO: it seems that Yahoo! converts relative links to
# absolute. This needs to be done on the content but seems to
# be a non-trival task python?
content = unicode(f.read(), 'utf-8')
if context and context.verbose:
print '............Content .................'
print content
print '...............EOF...................'
xpath = conf.get('xpath', **kwargs)
html5 = conf.get('html5', **kwargs) == 'true'
use_as_string = conf.get('useAsString', **kwargs) == 'true'
tree = html5parser.parse(f) if html5 else html.parse(f)
root = tree.getroot()
items = root.xpath(xpath)
if context and context.verbose:
print 'XPathFetchPage: found count items:', len(items)
for etree in items:
i = utils.etree_to_dict(etree)
if context and context.verbose:
print '--------------item data --------------------'
print i
print '--------------EOF item data ----------------'
if use_as_string:
yield {'content': unicode(i)}
else:
yield i
if item.get('forever'):
# _INPUT is pipeforever and not a loop,
# so we just yield our item once
break | A source that fetches the content of a given website as DOM nodes or a
string. Loopable.
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : dict
URL -- url object contain the URL to download
xpath -- xpath to extract
html5 -- use html5 parser?
useAsString -- emit items as string?
TODOS:
- don't retrieve pages larger than 1.5MB
- don't retrieve if page is not indexable.
Yields
------
_OUTPUT : items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipexpathfetchpage.py#L17-L82 |
ggaughan/pipe2py | pipe2py/lib/utils.py | extract_dependencies | def extract_dependencies(pipe_def=None, pipe_generator=None):
"""Extract modules used by a pipe"""
if pipe_def:
pydeps = gen_dependencies(pipe_def)
elif pipe_generator:
pydeps = pipe_generator(Context(describe_dependencies=True))
else:
raise Exception('Must supply at least one kwarg!')
return sorted(set(pydeps)) | python | def extract_dependencies(pipe_def=None, pipe_generator=None):
"""Extract modules used by a pipe"""
if pipe_def:
pydeps = gen_dependencies(pipe_def)
elif pipe_generator:
pydeps = pipe_generator(Context(describe_dependencies=True))
else:
raise Exception('Must supply at least one kwarg!')
return sorted(set(pydeps)) | Extract modules used by a pipe | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/lib/utils.py#L74-L83 |
ggaughan/pipe2py | pipe2py/lib/utils.py | extract_input | def extract_input(pipe_def=None, pipe_generator=None):
"""Extract inputs required by a pipe"""
if pipe_def:
pyinput = gen_input(pipe_def)
elif pipe_generator:
pyinput = pipe_generator(Context(describe_input=True))
else:
raise Exception('Must supply at least one kwarg!')
return sorted(list(pyinput)) | python | def extract_input(pipe_def=None, pipe_generator=None):
"""Extract inputs required by a pipe"""
if pipe_def:
pyinput = gen_input(pipe_def)
elif pipe_generator:
pyinput = pipe_generator(Context(describe_input=True))
else:
raise Exception('Must supply at least one kwarg!')
return sorted(list(pyinput)) | Extract inputs required by a pipe | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/lib/utils.py#L86-L95 |
ggaughan/pipe2py | pipe2py/lib/utils.py | pythonise | def pythonise(id, encoding='ascii'):
"""Return a Python-friendly id"""
replace = {'-': '_', ':': '_', '/': '_'}
func = lambda id, pair: id.replace(pair[0], pair[1])
id = reduce(func, replace.iteritems(), id)
id = '_%s' % id if id[0] in string.digits else id
return id.encode(encoding) | python | def pythonise(id, encoding='ascii'):
"""Return a Python-friendly id"""
replace = {'-': '_', ':': '_', '/': '_'}
func = lambda id, pair: id.replace(pair[0], pair[1])
id = reduce(func, replace.iteritems(), id)
id = '_%s' % id if id[0] in string.digits else id
return id.encode(encoding) | Return a Python-friendly id | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/lib/utils.py#L98-L104 |
ggaughan/pipe2py | pipe2py/lib/utils.py | etree_to_dict | def etree_to_dict(element):
"""Convert an eTree xml into dict imitating how Yahoo Pipes does it.
todo: further investigate white space and multivalue handling
"""
i = dict(element.items())
content = element.text.strip() if element.text else None
i.update({'content': content}) if content else None
if len(element.getchildren()):
for child in element.iterchildren():
tag = child.tag.split('}', 1)[-1]
new = etree_to_dict(child)
content = _make_content(i, tag, new)
i.update({tag: content}) if content else None
tag = 'content'
new = child.tail.strip() if child.tail else None
content = _make_content(i, tag, new)
i.update({tag: content}) if content else None
elif content and not set(i).difference(['content']):
# element is leaf node and doesn't have attributes
i = content
return i | python | def etree_to_dict(element):
"""Convert an eTree xml into dict imitating how Yahoo Pipes does it.
todo: further investigate white space and multivalue handling
"""
i = dict(element.items())
content = element.text.strip() if element.text else None
i.update({'content': content}) if content else None
if len(element.getchildren()):
for child in element.iterchildren():
tag = child.tag.split('}', 1)[-1]
new = etree_to_dict(child)
content = _make_content(i, tag, new)
i.update({tag: content}) if content else None
tag = 'content'
new = child.tail.strip() if child.tail else None
content = _make_content(i, tag, new)
i.update({tag: content}) if content else None
elif content and not set(i).difference(['content']):
# element is leaf node and doesn't have attributes
i = content
return i | Convert an eTree xml into dict imitating how Yahoo Pipes does it.
todo: further investigate white space and multivalue handling | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/lib/utils.py#L146-L170 |
ggaughan/pipe2py | pipe2py/lib/utils.py | broadcast | def broadcast(_INPUT, *funcs, **kwargs):
"""copies an iterable and delivers the items to multiple functions
/--> foo2bar(_INPUT) --> \
/ \
_INPUT ---> foo2baz(_INPUT) ---> _OUTPUT
\ /
\--> foo2qux(_INPUT) --> /
One way to construct such a flow in code would be::
_INPUT = repeat('foo', 3)
foo2bar = lambda word: word.replace('foo', 'bar')
foo2baz = lambda word: word.replace('foo', 'baz')
foo2qux = lambda word: word.replace('foo', 'quz')
_OUTPUT = broadcast(_INPUT, foo2bar, foo2baz, foo2qux)
_OUTPUT == repeat(('bar', 'baz', 'qux'), 3)
"""
map_func = kwargs.get('map_func', _map_func)
apply_func = kwargs.get('apply_func', _apply_func)
splits = izip(*tee(_INPUT, len(funcs)))
return map_func(partial(apply_func, funcs), splits) | python | def broadcast(_INPUT, *funcs, **kwargs):
"""copies an iterable and delivers the items to multiple functions
/--> foo2bar(_INPUT) --> \
/ \
_INPUT ---> foo2baz(_INPUT) ---> _OUTPUT
\ /
\--> foo2qux(_INPUT) --> /
One way to construct such a flow in code would be::
_INPUT = repeat('foo', 3)
foo2bar = lambda word: word.replace('foo', 'bar')
foo2baz = lambda word: word.replace('foo', 'baz')
foo2qux = lambda word: word.replace('foo', 'quz')
_OUTPUT = broadcast(_INPUT, foo2bar, foo2baz, foo2qux)
_OUTPUT == repeat(('bar', 'baz', 'qux'), 3)
"""
map_func = kwargs.get('map_func', _map_func)
apply_func = kwargs.get('apply_func', _apply_func)
splits = izip(*tee(_INPUT, len(funcs)))
return map_func(partial(apply_func, funcs), splits) | copies an iterable and delivers the items to multiple functions
/--> foo2bar(_INPUT) --> \
/ \
_INPUT ---> foo2baz(_INPUT) ---> _OUTPUT
\ /
\--> foo2qux(_INPUT) --> /
One way to construct such a flow in code would be::
_INPUT = repeat('foo', 3)
foo2bar = lambda word: word.replace('foo', 'bar')
foo2baz = lambda word: word.replace('foo', 'baz')
foo2qux = lambda word: word.replace('foo', 'quz')
_OUTPUT = broadcast(_INPUT, foo2bar, foo2baz, foo2qux)
_OUTPUT == repeat(('bar', 'baz', 'qux'), 3) | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/lib/utils.py#L217-L238 |
ggaughan/pipe2py | pipe2py/lib/utils.py | dispatch | def dispatch(splits, *funcs, **kwargs):
"""takes multiple iterables (returned by dispatch or broadcast) and delivers
the items to multiple functions
/-----> _INPUT1 --> double(_INPUT1) --> \
/ \
splits ------> _INPUT2 --> triple(_INPUT2) ---> _OUTPUT
\ /
\--> _INPUT3 --> quadruple(_INPUT3) --> /
One way to construct such a flow in code would be::
splits = repeat(('bar', 'baz', 'qux'), 3)
double = lambda word: word * 2
triple = lambda word: word * 3
quadruple = lambda word: word * 4
_OUTPUT = dispatch(splits, double, triple, quadruple)
_OUTPUT == repeat(('barbar', 'bazbazbaz', 'quxquxquxqux'), 3)
"""
map_func = kwargs.get('map_func', _map_func)
apply_func = kwargs.get('apply_func', _apply_func)
return map_func(partial(apply_func, funcs), splits) | python | def dispatch(splits, *funcs, **kwargs):
"""takes multiple iterables (returned by dispatch or broadcast) and delivers
the items to multiple functions
/-----> _INPUT1 --> double(_INPUT1) --> \
/ \
splits ------> _INPUT2 --> triple(_INPUT2) ---> _OUTPUT
\ /
\--> _INPUT3 --> quadruple(_INPUT3) --> /
One way to construct such a flow in code would be::
splits = repeat(('bar', 'baz', 'qux'), 3)
double = lambda word: word * 2
triple = lambda word: word * 3
quadruple = lambda word: word * 4
_OUTPUT = dispatch(splits, double, triple, quadruple)
_OUTPUT == repeat(('barbar', 'bazbazbaz', 'quxquxquxqux'), 3)
"""
map_func = kwargs.get('map_func', _map_func)
apply_func = kwargs.get('apply_func', _apply_func)
return map_func(partial(apply_func, funcs), splits) | takes multiple iterables (returned by dispatch or broadcast) and delivers
the items to multiple functions
/-----> _INPUT1 --> double(_INPUT1) --> \
/ \
splits ------> _INPUT2 --> triple(_INPUT2) ---> _OUTPUT
\ /
\--> _INPUT3 --> quadruple(_INPUT3) --> /
One way to construct such a flow in code would be::
splits = repeat(('bar', 'baz', 'qux'), 3)
double = lambda word: word * 2
triple = lambda word: word * 3
quadruple = lambda word: word * 4
_OUTPUT = dispatch(splits, double, triple, quadruple)
_OUTPUT == repeat(('barbar', 'bazbazbaz', 'quxquxquxqux'), 3) | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/lib/utils.py#L241-L262 |
ggaughan/pipe2py | pipe2py/lib/utils.py | get_input | def get_input(context, conf):
"""Gets a user parameter, either from the console or from an outer
submodule/system
Assumes conf has name, default, prompt and debug
"""
name = conf['name']['value']
prompt = conf['prompt']['value']
default = conf['default']['value'] or conf['debug']['value']
if context.submodule or context.inputs:
value = context.inputs.get(name, default)
elif not context.test:
# we skip user interaction during tests
raw = raw_input("%s (default=%s) " % (encode(prompt), encode(default)))
value = raw or default
else:
value = default
return value | python | def get_input(context, conf):
"""Gets a user parameter, either from the console or from an outer
submodule/system
Assumes conf has name, default, prompt and debug
"""
name = conf['name']['value']
prompt = conf['prompt']['value']
default = conf['default']['value'] or conf['debug']['value']
if context.submodule or context.inputs:
value = context.inputs.get(name, default)
elif not context.test:
# we skip user interaction during tests
raw = raw_input("%s (default=%s) " % (encode(prompt), encode(default)))
value = raw or default
else:
value = default
return value | Gets a user parameter, either from the console or from an outer
submodule/system
Assumes conf has name, default, prompt and debug | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/lib/utils.py#L295-L314 |
ggaughan/pipe2py | pipe2py/lib/utils.py | url_quote | def url_quote(url):
"""Ensure url is valid"""
try:
return quote(url, safe=URL_SAFE)
except KeyError:
return quote(encode(url), safe=URL_SAFE) | python | def url_quote(url):
"""Ensure url is valid"""
try:
return quote(url, safe=URL_SAFE)
except KeyError:
return quote(encode(url), safe=URL_SAFE) | Ensure url is valid | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/lib/utils.py#L370-L375 |
ggaughan/pipe2py | pipe2py/lib/utils.py | multi_substitute | def multi_substitute(word, rules):
""" Apply multiple regex rules to 'word'
http://code.activestate.com/recipes/
576710-multi-regex-single-pass-replace-of-multiple-regexe/
"""
flags = rules[0]['flags']
# Create a combined regex from the rules
tuples = ((p, r['match']) for p, r in enumerate(rules))
regexes = ('(?P<match_%i>%s)' % (p, r) for p, r in tuples)
pattern = '|'.join(regexes)
regex = re.compile(pattern, flags)
resplit = re.compile('\$(\d+)')
# For each match, look-up corresponding replace value in dictionary
rules_in_series = ifilter(itemgetter('series'), rules)
rules_in_parallel = (r for r in rules if not r['series'])
try:
has_parallel = [rules_in_parallel.next()]
except StopIteration:
has_parallel = []
# print('================')
# pprint(rules)
# print('word:', word)
# print('pattern', pattern)
# print('flags', flags)
for _ in chain(rules_in_series, has_parallel):
# print('~~~~~~~~~~~~~~~~')
# print('new round')
# print('word:', word)
# found = list(regex.finditer(word))
# matchitems = [match.groupdict().items() for match in found]
# pprint(matchitems)
prev_name = None
prev_is_series = None
i = 0
for match in regex.finditer(word):
item = ifilter(itemgetter(1), match.groupdict().iteritems()).next()
# print('----------------')
# print('groupdict:', match.groupdict().items())
# print('item:', item)
if not item:
continue
name = item[0]
rule = rules[int(name[6:])]
series = rule.get('series')
kwargs = {'count': rule['count'], 'series': series}
is_previous = name is prev_name
singlematch = kwargs['count'] is 1
is_series = prev_is_series or kwargs['series']
isnt_previous = bool(prev_name) and not is_previous
if (is_previous and singlematch) or (isnt_previous and is_series):
continue
prev_name = name
prev_is_series = series
if resplit.findall(rule['replace']):
splits = resplit.split(rule['replace'])
words = _gen_words(match, splits)
else:
splits = rule['replace']
start = match.start() + i
end = match.end() + i
words = [word[:start], splits, word[end:]]
i += rule['offset']
# words = list(words)
word = ''.join(words)
# print('name:', name)
# print('prereplace:', rule['replace'])
# print('splits:', splits)
# print('resplits:', resplit.findall(rule['replace']))
# print('groups:', filter(None, match.groups()))
# print('i:', i)
# print('words:', words)
# print('range:', match.start(), '-', match.end())
# print('replace:', word)
# print('substitution:', word)
return word | python | def multi_substitute(word, rules):
""" Apply multiple regex rules to 'word'
http://code.activestate.com/recipes/
576710-multi-regex-single-pass-replace-of-multiple-regexe/
"""
flags = rules[0]['flags']
# Create a combined regex from the rules
tuples = ((p, r['match']) for p, r in enumerate(rules))
regexes = ('(?P<match_%i>%s)' % (p, r) for p, r in tuples)
pattern = '|'.join(regexes)
regex = re.compile(pattern, flags)
resplit = re.compile('\$(\d+)')
# For each match, look-up corresponding replace value in dictionary
rules_in_series = ifilter(itemgetter('series'), rules)
rules_in_parallel = (r for r in rules if not r['series'])
try:
has_parallel = [rules_in_parallel.next()]
except StopIteration:
has_parallel = []
# print('================')
# pprint(rules)
# print('word:', word)
# print('pattern', pattern)
# print('flags', flags)
for _ in chain(rules_in_series, has_parallel):
# print('~~~~~~~~~~~~~~~~')
# print('new round')
# print('word:', word)
# found = list(regex.finditer(word))
# matchitems = [match.groupdict().items() for match in found]
# pprint(matchitems)
prev_name = None
prev_is_series = None
i = 0
for match in regex.finditer(word):
item = ifilter(itemgetter(1), match.groupdict().iteritems()).next()
# print('----------------')
# print('groupdict:', match.groupdict().items())
# print('item:', item)
if not item:
continue
name = item[0]
rule = rules[int(name[6:])]
series = rule.get('series')
kwargs = {'count': rule['count'], 'series': series}
is_previous = name is prev_name
singlematch = kwargs['count'] is 1
is_series = prev_is_series or kwargs['series']
isnt_previous = bool(prev_name) and not is_previous
if (is_previous and singlematch) or (isnt_previous and is_series):
continue
prev_name = name
prev_is_series = series
if resplit.findall(rule['replace']):
splits = resplit.split(rule['replace'])
words = _gen_words(match, splits)
else:
splits = rule['replace']
start = match.start() + i
end = match.end() + i
words = [word[:start], splits, word[end:]]
i += rule['offset']
# words = list(words)
word = ''.join(words)
# print('name:', name)
# print('prereplace:', rule['replace'])
# print('splits:', splits)
# print('resplits:', resplit.findall(rule['replace']))
# print('groups:', filter(None, match.groups()))
# print('i:', i)
# print('words:', words)
# print('range:', match.start(), '-', match.end())
# print('replace:', word)
# print('substitution:', word)
return word | Apply multiple regex rules to 'word'
http://code.activestate.com/recipes/
576710-multi-regex-single-pass-replace-of-multiple-regexe/ | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/lib/utils.py#L397-L486 |
ggaughan/pipe2py | pipe2py/modules/pipeitembuilder.py | asyncPipeItembuilder | def asyncPipeItembuilder(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that asynchronously builds an item. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : {
'attrs': [
{'key': {'value': 'title'}, 'value': {'value': 'new title'}},
{'key': {'value': 'desc.content'}, 'value': {'value': 'new desc'}}
]
}
Returns
------
_OUTPUT : twisted.internet.defer.Deferred generator of items
"""
pkwargs = cdicts(opts, kwargs)
asyncFuncs = yield asyncGetSplits(None, conf['attrs'], **pkwargs)
_input = yield _INPUT
finite = utils.finitize(_input)
inputs = imap(DotDict, finite)
pieces = yield asyncImap(asyncFuncs[0], inputs)
results = imap(utils.parse_params, pieces)
_OUTPUT = imap(DotDict, results)
returnValue(_OUTPUT) | python | def asyncPipeItembuilder(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that asynchronously builds an item. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : {
'attrs': [
{'key': {'value': 'title'}, 'value': {'value': 'new title'}},
{'key': {'value': 'desc.content'}, 'value': {'value': 'new desc'}}
]
}
Returns
------
_OUTPUT : twisted.internet.defer.Deferred generator of items
"""
pkwargs = cdicts(opts, kwargs)
asyncFuncs = yield asyncGetSplits(None, conf['attrs'], **pkwargs)
_input = yield _INPUT
finite = utils.finitize(_input)
inputs = imap(DotDict, finite)
pieces = yield asyncImap(asyncFuncs[0], inputs)
results = imap(utils.parse_params, pieces)
_OUTPUT = imap(DotDict, results)
returnValue(_OUTPUT) | A source that asynchronously builds an item. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : {
'attrs': [
{'key': {'value': 'title'}, 'value': {'value': 'new title'}},
{'key': {'value': 'desc.content'}, 'value': {'value': 'new desc'}}
]
}
Returns
------
_OUTPUT : twisted.internet.defer.Deferred generator of items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipeitembuilder.py#L23-L49 |
ggaughan/pipe2py | pipe2py/modules/pipeitembuilder.py | pipe_itembuilder | def pipe_itembuilder(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that builds an item. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items
conf : {
'attrs': [
{'key': {'value': <'title'>}, 'value': {'value': <'chair'>}},
{'key': {'value': <'color'>}, 'value': {'value': <'red'>}}
]
}
Returns
------
_OUTPUT : generator of items
"""
funcs = get_splits(None, conf['attrs'], **cdicts(opts, kwargs))
finite = utils.finitize(_INPUT)
inputs = imap(DotDict, finite)
pieces = imap(funcs[0], inputs)
results = imap(utils.parse_params, pieces)
_OUTPUT = imap(DotDict, results)
return _OUTPUT | python | def pipe_itembuilder(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that builds an item. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items
conf : {
'attrs': [
{'key': {'value': <'title'>}, 'value': {'value': <'chair'>}},
{'key': {'value': <'color'>}, 'value': {'value': <'red'>}}
]
}
Returns
------
_OUTPUT : generator of items
"""
funcs = get_splits(None, conf['attrs'], **cdicts(opts, kwargs))
finite = utils.finitize(_INPUT)
inputs = imap(DotDict, finite)
pieces = imap(funcs[0], inputs)
results = imap(utils.parse_params, pieces)
_OUTPUT = imap(DotDict, results)
return _OUTPUT | A source that builds an item. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items
conf : {
'attrs': [
{'key': {'value': <'title'>}, 'value': {'value': <'chair'>}},
{'key': {'value': <'color'>}, 'value': {'value': <'red'>}}
]
}
Returns
------
_OUTPUT : generator of items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipeitembuilder.py#L53-L77 |
ggaughan/pipe2py | pipe2py/modules/pipeloop.py | asyncPipeLoop | def asyncPipeLoop(context=None, _INPUT=None, conf=None, embed=None, **kwargs):
"""An operator that asynchronously loops over the input and performs the
embedded submodule. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
embed : the submodule, i.e., asyncPipe*(context, _INPUT, conf)
Most modules, with the exception of User inputs and Operators can be
sub-modules.
conf : {
'assign_part': {'value': <all or first>},
'assign_to': {'value': <assigned field name>},
'emit_part': {'value': <all or first>},
'mode': {'value': <assign or EMIT>},
'with': {'value': <looped field name or blank>},
'embed': {'value': {'conf': <module conf>}}
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items
"""
cust_func = get_cust_func(context, conf, embed, parse_embed, **kwargs)
opts.update({'cust_func': cust_func})
splits = yield asyncGetSplits(_INPUT, conf, **cdicts(opts, kwargs))
gathered = yield asyncStarMap(asyncParseResult, splits)
_OUTPUT = utils.multiplex(gathered)
returnValue(_OUTPUT) | python | def asyncPipeLoop(context=None, _INPUT=None, conf=None, embed=None, **kwargs):
"""An operator that asynchronously loops over the input and performs the
embedded submodule. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
embed : the submodule, i.e., asyncPipe*(context, _INPUT, conf)
Most modules, with the exception of User inputs and Operators can be
sub-modules.
conf : {
'assign_part': {'value': <all or first>},
'assign_to': {'value': <assigned field name>},
'emit_part': {'value': <all or first>},
'mode': {'value': <assign or EMIT>},
'with': {'value': <looped field name or blank>},
'embed': {'value': {'conf': <module conf>}}
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items
"""
cust_func = get_cust_func(context, conf, embed, parse_embed, **kwargs)
opts.update({'cust_func': cust_func})
splits = yield asyncGetSplits(_INPUT, conf, **cdicts(opts, kwargs))
gathered = yield asyncStarMap(asyncParseResult, splits)
_OUTPUT = utils.multiplex(gathered)
returnValue(_OUTPUT) | An operator that asynchronously loops over the input and performs the
embedded submodule. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
embed : the submodule, i.e., asyncPipe*(context, _INPUT, conf)
Most modules, with the exception of User inputs and Operators can be
sub-modules.
conf : {
'assign_part': {'value': <all or first>},
'assign_to': {'value': <assigned field name>},
'emit_part': {'value': <all or first>},
'mode': {'value': <assign or EMIT>},
'with': {'value': <looped field name or blank>},
'embed': {'value': {'conf': <module conf>}}
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipeloop.py#L89-L119 |
ggaughan/pipe2py | pipe2py/modules/pipeloop.py | pipe_loop | def pipe_loop(context=None, _INPUT=None, conf=None, embed=None, **kwargs):
"""An operator that loops over the input and performs the embedded
submodule. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
embed : the submodule, i.e., pipe_*(context, _INPUT, conf)
Most modules, with the exception of User inputs and Operators can be
sub-modules.
conf : {
'assign_part': {'value': <all or first>},
'assign_to': {'value': <assigned field name>},
'emit_part': {'value': <all or first>},
'mode': {'value': <assign or EMIT>},
'with': {'value': <looped field name or blank>},
'embed': {'value': {'conf': <module conf>}}
}
Returns
-------
_OUTPUT : generator of items
"""
cust_func = get_cust_func(context, conf, embed, parse_embed, **kwargs)
opts.update({'cust_func': cust_func})
splits = get_splits(_INPUT, conf, **cdicts(opts, kwargs))
gathered = starmap(parse_result, splits)
_OUTPUT = utils.multiplex(gathered)
return _OUTPUT | python | def pipe_loop(context=None, _INPUT=None, conf=None, embed=None, **kwargs):
"""An operator that loops over the input and performs the embedded
submodule. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
embed : the submodule, i.e., pipe_*(context, _INPUT, conf)
Most modules, with the exception of User inputs and Operators can be
sub-modules.
conf : {
'assign_part': {'value': <all or first>},
'assign_to': {'value': <assigned field name>},
'emit_part': {'value': <all or first>},
'mode': {'value': <assign or EMIT>},
'with': {'value': <looped field name or blank>},
'embed': {'value': {'conf': <module conf>}}
}
Returns
-------
_OUTPUT : generator of items
"""
cust_func = get_cust_func(context, conf, embed, parse_embed, **kwargs)
opts.update({'cust_func': cust_func})
splits = get_splits(_INPUT, conf, **cdicts(opts, kwargs))
gathered = starmap(parse_result, splits)
_OUTPUT = utils.multiplex(gathered)
return _OUTPUT | An operator that loops over the input and performs the embedded
submodule. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
embed : the submodule, i.e., pipe_*(context, _INPUT, conf)
Most modules, with the exception of User inputs and Operators can be
sub-modules.
conf : {
'assign_part': {'value': <all or first>},
'assign_to': {'value': <assigned field name>},
'emit_part': {'value': <all or first>},
'mode': {'value': <assign or EMIT>},
'with': {'value': <looped field name or blank>},
'embed': {'value': {'conf': <module conf>}}
}
Returns
-------
_OUTPUT : generator of items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipeloop.py#L123-L153 |
ggaughan/pipe2py | pipe2py/modules/pipefetchpage.py | pipe_fetchpage | def pipe_fetchpage(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that fetches the content of a given web site as a string.
Loopable.
context : pipe2py.Context object
_INPUT : pipeforever asyncPipe or an iterable of items or fields
conf : dict
URL -- url object contain the URL to download
from -- string from where to start the input
to -- string to limit the input
token -- if present, split the input on this token to generate items
Description: http://pipes.yahoo.com/pipes/docs?doc=sources#FetchPage
TODOS:
- don't retrieve pages larger than 200k
- don't retrieve if page is not indexable.
- item delimiter removes the closing tag if using a HTML tag
(not documented but happens)
- items should be cleaned, i.e. stripped of HTML tags
Yields
------
_OUTPUT : items
"""
conf = DotDict(conf)
split_token = conf.get('token', **kwargs)
urls = utils.listize(conf['URL'])
for item in _INPUT:
for item_url in urls:
url = utils.get_value(DotDict(item_url), DotDict(item), **kwargs)
url = utils.get_abspath(url)
if not url:
continue
f = urlopen(url)
# TODO: it seems that Yahoo! converts relative links to
# absolute. This needs to be done on the content but seems to
# be a non-trival task python?
content = unicode(f.read(), 'utf-8')
if context and context.verbose:
print '............Content .................'
print content
print '...............EOF...................'
parsed = _parse_content(content, conf, **kwargs)
items = parsed.split(split_token) if split_token else [parsed]
if context and context.verbose:
print "FetchPage: found count items:", len(items)
for i in items:
if context and context.verbose:
print "--------------item data --------------------"
print i
print "--------------EOF item data ----------------"
yield {"content": i}
if item.get('forever'):
# _INPUT is pipeforever and not a loop,
# so we just yield our item once
break | python | def pipe_fetchpage(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that fetches the content of a given web site as a string.
Loopable.
context : pipe2py.Context object
_INPUT : pipeforever asyncPipe or an iterable of items or fields
conf : dict
URL -- url object contain the URL to download
from -- string from where to start the input
to -- string to limit the input
token -- if present, split the input on this token to generate items
Description: http://pipes.yahoo.com/pipes/docs?doc=sources#FetchPage
TODOS:
- don't retrieve pages larger than 200k
- don't retrieve if page is not indexable.
- item delimiter removes the closing tag if using a HTML tag
(not documented but happens)
- items should be cleaned, i.e. stripped of HTML tags
Yields
------
_OUTPUT : items
"""
conf = DotDict(conf)
split_token = conf.get('token', **kwargs)
urls = utils.listize(conf['URL'])
for item in _INPUT:
for item_url in urls:
url = utils.get_value(DotDict(item_url), DotDict(item), **kwargs)
url = utils.get_abspath(url)
if not url:
continue
f = urlopen(url)
# TODO: it seems that Yahoo! converts relative links to
# absolute. This needs to be done on the content but seems to
# be a non-trival task python?
content = unicode(f.read(), 'utf-8')
if context and context.verbose:
print '............Content .................'
print content
print '...............EOF...................'
parsed = _parse_content(content, conf, **kwargs)
items = parsed.split(split_token) if split_token else [parsed]
if context and context.verbose:
print "FetchPage: found count items:", len(items)
for i in items:
if context and context.verbose:
print "--------------item data --------------------"
print i
print "--------------EOF item data ----------------"
yield {"content": i}
if item.get('forever'):
# _INPUT is pipeforever and not a loop,
# so we just yield our item once
break | A source that fetches the content of a given web site as a string.
Loopable.
context : pipe2py.Context object
_INPUT : pipeforever asyncPipe or an iterable of items or fields
conf : dict
URL -- url object contain the URL to download
from -- string from where to start the input
to -- string to limit the input
token -- if present, split the input on this token to generate items
Description: http://pipes.yahoo.com/pipes/docs?doc=sources#FetchPage
TODOS:
- don't retrieve pages larger than 200k
- don't retrieve if page is not indexable.
- item delimiter removes the closing tag if using a HTML tag
(not documented but happens)
- items should be cleaned, i.e. stripped of HTML tags
Yields
------
_OUTPUT : items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipefetchpage.py#L50-L117 |
ggaughan/pipe2py | pipe2py/modules/pipefetchdata.py | pipe_fetchdata | def pipe_fetchdata(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that fetches and parses an XML or JSON file. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : {
'URL': {'value': <url>},
'path': {'value': <dot separated path to data list>}
}
Yields
------
_OUTPUT : items
Examples
--------
>>> from os import path as p
>>> from pipe2py.modules.pipeforever import pipe_forever
>>> parent = p.dirname(p.dirname(__file__))
>>> abspath = p.abspath(p.join(parent, 'data', 'gigs.json'))
>>> path = 'value.items'
>>> url = "file://%s" % abspath
>>> conf = {'URL': {'value': url}, 'path': {'value': path}}
>>> pipe_fetchdata(_INPUT=pipe_forever(), conf=conf).next().keys()[:5]
[u'y:repeatcount', u'description', u'pubDate', u'title', u'y:published']
>>> abspath = p.abspath(p.join(parent, 'data', 'places.xml'))
>>> path = 'appointment'
>>> url = "file://%s" % abspath
>>> conf = {'URL': {'value': url}, 'path': {'value': path}}
>>> sorted(pipe_fetchdata(_INPUT=pipe_forever(), conf=conf).next().keys())
['alarmTime', 'begin', 'duration', 'places', 'subject', 'uid']
>>> conf = {'URL': {'value': url}, 'path': {'value': ''}}
>>> sorted(pipe_fetchdata(_INPUT=pipe_forever(), conf=conf).next().keys())
['appointment', 'reminder']
"""
# todo: iCal and KML
funcs = get_splits(None, conf, **cdicts(opts, kwargs))
parsed = get_parsed(_INPUT, funcs[0])
results = starmap(parse_result, parsed)
items = imap(utils.gen_items, results)
_OUTPUT = utils.multiplex(items)
return _OUTPUT | python | def pipe_fetchdata(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that fetches and parses an XML or JSON file. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : {
'URL': {'value': <url>},
'path': {'value': <dot separated path to data list>}
}
Yields
------
_OUTPUT : items
Examples
--------
>>> from os import path as p
>>> from pipe2py.modules.pipeforever import pipe_forever
>>> parent = p.dirname(p.dirname(__file__))
>>> abspath = p.abspath(p.join(parent, 'data', 'gigs.json'))
>>> path = 'value.items'
>>> url = "file://%s" % abspath
>>> conf = {'URL': {'value': url}, 'path': {'value': path}}
>>> pipe_fetchdata(_INPUT=pipe_forever(), conf=conf).next().keys()[:5]
[u'y:repeatcount', u'description', u'pubDate', u'title', u'y:published']
>>> abspath = p.abspath(p.join(parent, 'data', 'places.xml'))
>>> path = 'appointment'
>>> url = "file://%s" % abspath
>>> conf = {'URL': {'value': url}, 'path': {'value': path}}
>>> sorted(pipe_fetchdata(_INPUT=pipe_forever(), conf=conf).next().keys())
['alarmTime', 'begin', 'duration', 'places', 'subject', 'uid']
>>> conf = {'URL': {'value': url}, 'path': {'value': ''}}
>>> sorted(pipe_fetchdata(_INPUT=pipe_forever(), conf=conf).next().keys())
['appointment', 'reminder']
"""
# todo: iCal and KML
funcs = get_splits(None, conf, **cdicts(opts, kwargs))
parsed = get_parsed(_INPUT, funcs[0])
results = starmap(parse_result, parsed)
items = imap(utils.gen_items, results)
_OUTPUT = utils.multiplex(items)
return _OUTPUT | A source that fetches and parses an XML or JSON file. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : {
'URL': {'value': <url>},
'path': {'value': <dot separated path to data list>}
}
Yields
------
_OUTPUT : items
Examples
--------
>>> from os import path as p
>>> from pipe2py.modules.pipeforever import pipe_forever
>>> parent = p.dirname(p.dirname(__file__))
>>> abspath = p.abspath(p.join(parent, 'data', 'gigs.json'))
>>> path = 'value.items'
>>> url = "file://%s" % abspath
>>> conf = {'URL': {'value': url}, 'path': {'value': path}}
>>> pipe_fetchdata(_INPUT=pipe_forever(), conf=conf).next().keys()[:5]
[u'y:repeatcount', u'description', u'pubDate', u'title', u'y:published']
>>> abspath = p.abspath(p.join(parent, 'data', 'places.xml'))
>>> path = 'appointment'
>>> url = "file://%s" % abspath
>>> conf = {'URL': {'value': url}, 'path': {'value': path}}
>>> sorted(pipe_fetchdata(_INPUT=pipe_forever(), conf=conf).next().keys())
['alarmTime', 'begin', 'duration', 'places', 'subject', 'uid']
>>> conf = {'URL': {'value': url}, 'path': {'value': ''}}
>>> sorted(pipe_fetchdata(_INPUT=pipe_forever(), conf=conf).next().keys())
['appointment', 'reminder'] | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipefetchdata.py#L89-L132 |
ggaughan/pipe2py | pipe2py/modules/pipefetch.py | asyncPipeFetch | def asyncPipeFetch(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that asynchronously fetches and parses one or more feeds to
return the feed entries. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : {
'URL': [
{'type': 'url', 'value': <url1>},
{'type': 'url', 'value': <url2>},
{'type': 'url', 'value': <url3>},
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items
"""
splits = yield asyncGetSplits(_INPUT, conf['URL'], **cdicts(opts, kwargs))
items = yield asyncStarMap(asyncParseResult, splits)
_OUTPUT = utils.multiplex(items)
returnValue(_OUTPUT) | python | def asyncPipeFetch(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that asynchronously fetches and parses one or more feeds to
return the feed entries. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : {
'URL': [
{'type': 'url', 'value': <url1>},
{'type': 'url', 'value': <url2>},
{'type': 'url', 'value': <url3>},
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items
"""
splits = yield asyncGetSplits(_INPUT, conf['URL'], **cdicts(opts, kwargs))
items = yield asyncStarMap(asyncParseResult, splits)
_OUTPUT = utils.multiplex(items)
returnValue(_OUTPUT) | A source that asynchronously fetches and parses one or more feeds to
return the feed entries. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : {
'URL': [
{'type': 'url', 'value': <url1>},
{'type': 'url', 'value': <url2>},
{'type': 'url', 'value': <url3>},
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipefetch.py#L51-L74 |
ggaughan/pipe2py | pipe2py/modules/pipefetch.py | pipe_fetch | def pipe_fetch(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that fetches and parses one or more feeds to return the
entries. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : {
'URL': [
{'type': 'url', 'value': <url1>},
{'type': 'url', 'value': <url2>},
{'type': 'url', 'value': <url3>},
]
}
Returns
-------
_OUTPUT : generator of items
"""
splits = get_splits(_INPUT, conf['URL'], **cdicts(opts, kwargs))
items = starmap(parse_result, splits)
_OUTPUT = utils.multiplex(items)
return _OUTPUT | python | def pipe_fetch(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that fetches and parses one or more feeds to return the
entries. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : {
'URL': [
{'type': 'url', 'value': <url1>},
{'type': 'url', 'value': <url2>},
{'type': 'url', 'value': <url3>},
]
}
Returns
-------
_OUTPUT : generator of items
"""
splits = get_splits(_INPUT, conf['URL'], **cdicts(opts, kwargs))
items = starmap(parse_result, splits)
_OUTPUT = utils.multiplex(items)
return _OUTPUT | A source that fetches and parses one or more feeds to return the
entries. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : {
'URL': [
{'type': 'url', 'value': <url1>},
{'type': 'url', 'value': <url2>},
{'type': 'url', 'value': <url3>},
]
}
Returns
-------
_OUTPUT : generator of items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipefetch.py#L86-L109 |
ggaughan/pipe2py | pipe2py/modules/pipefilter.py | pipe_filter | def pipe_filter(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that filters for source items matching the given rules.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {
'MODE': {'value': <'permit' or 'block'>},
'COMBINE': {'value': <'and' or 'or'>}
'RULE': [
{
'field': {'value': 'search field'},
'op': {'value': 'one of SWITCH above'},
'value': {'value': 'search term'}
}
]
}
kwargs : other inputs, e.g., to feed terminals for rule values
Returns
-------
_OUTPUT : generator of filtered items
Examples
--------
>>> import os.path as p
>>> from pipe2py.modules.pipeforever import pipe_forever
>>> from pipe2py.modules.pipefetchdata import pipe_fetchdata
>>> parent = p.dirname(p.dirname(__file__))
>>> file_name = p.abspath(p.join(parent, 'data', 'gigs.json'))
>>> path = 'value.items'
>>> url = 'file://%s' % file_name
>>> conf = {'URL': {'value': url}, 'path': {'value': path}}
>>> input = pipe_fetchdata(_INPUT=pipe_forever(), conf=conf)
>>> mode = {'value': 'permit'}
>>> combine = {'value': 'and'}
>>> rule = [{'field': {'value': 'title'}, 'op': {'value': 'contains'}, \
'value': {'value': 'web'}}]
>>> conf = {'MODE': mode, 'COMBINE': combine, 'RULE': rule}
>>> pipe_filter(_INPUT=input, conf=conf).next()['title']
u'E-Commerce Website Developer | Elance Job'
>>> rule = [{'field': {'value': 'title'}, 'op': {'value': 'contains'}, \
'value': {'value': 'kjhlked'}}]
>>> conf = {'MODE': mode, 'COMBINE': combine, 'RULE': rule}
>>> list(pipe_filter(_INPUT=input, conf=conf))
[]
"""
conf = DotDict(conf)
test = kwargs.pop('pass_if', None)
permit = conf.get('MODE', **kwargs) == 'permit'
combine = conf.get('COMBINE', **kwargs)
if not combine in {'and', 'or'}:
raise Exception(
"Invalid combine: %s. (Expected 'and' or 'or')" % combine)
rule_defs = map(DotDict, utils.listize(conf['RULE']))
get_pass = partial(utils.get_pass, test=test)
get_value = partial(utils.get_value, **kwargs)
parse_conf = partial(utils.parse_conf, parse_func=get_value, **kwargs)
get_rules = lambda i: imap(parse_conf, rule_defs, repeat(i))
funcs = [COMBINE_BOOLEAN[combine], utils.passthrough, utils.passthrough]
inputs = imap(DotDict, _INPUT)
splits = utils.broadcast(inputs, get_rules, utils.passthrough, get_pass)
outputs = starmap(partial(parse_rules, **kwargs), splits)
parsed = utils.dispatch(outputs, *funcs)
gathered = starmap(partial(parse_result, permit=permit), parsed)
_OUTPUT = ifilter(None, gathered)
return _OUTPUT | python | def pipe_filter(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that filters for source items matching the given rules.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {
'MODE': {'value': <'permit' or 'block'>},
'COMBINE': {'value': <'and' or 'or'>}
'RULE': [
{
'field': {'value': 'search field'},
'op': {'value': 'one of SWITCH above'},
'value': {'value': 'search term'}
}
]
}
kwargs : other inputs, e.g., to feed terminals for rule values
Returns
-------
_OUTPUT : generator of filtered items
Examples
--------
>>> import os.path as p
>>> from pipe2py.modules.pipeforever import pipe_forever
>>> from pipe2py.modules.pipefetchdata import pipe_fetchdata
>>> parent = p.dirname(p.dirname(__file__))
>>> file_name = p.abspath(p.join(parent, 'data', 'gigs.json'))
>>> path = 'value.items'
>>> url = 'file://%s' % file_name
>>> conf = {'URL': {'value': url}, 'path': {'value': path}}
>>> input = pipe_fetchdata(_INPUT=pipe_forever(), conf=conf)
>>> mode = {'value': 'permit'}
>>> combine = {'value': 'and'}
>>> rule = [{'field': {'value': 'title'}, 'op': {'value': 'contains'}, \
'value': {'value': 'web'}}]
>>> conf = {'MODE': mode, 'COMBINE': combine, 'RULE': rule}
>>> pipe_filter(_INPUT=input, conf=conf).next()['title']
u'E-Commerce Website Developer | Elance Job'
>>> rule = [{'field': {'value': 'title'}, 'op': {'value': 'contains'}, \
'value': {'value': 'kjhlked'}}]
>>> conf = {'MODE': mode, 'COMBINE': combine, 'RULE': rule}
>>> list(pipe_filter(_INPUT=input, conf=conf))
[]
"""
conf = DotDict(conf)
test = kwargs.pop('pass_if', None)
permit = conf.get('MODE', **kwargs) == 'permit'
combine = conf.get('COMBINE', **kwargs)
if not combine in {'and', 'or'}:
raise Exception(
"Invalid combine: %s. (Expected 'and' or 'or')" % combine)
rule_defs = map(DotDict, utils.listize(conf['RULE']))
get_pass = partial(utils.get_pass, test=test)
get_value = partial(utils.get_value, **kwargs)
parse_conf = partial(utils.parse_conf, parse_func=get_value, **kwargs)
get_rules = lambda i: imap(parse_conf, rule_defs, repeat(i))
funcs = [COMBINE_BOOLEAN[combine], utils.passthrough, utils.passthrough]
inputs = imap(DotDict, _INPUT)
splits = utils.broadcast(inputs, get_rules, utils.passthrough, get_pass)
outputs = starmap(partial(parse_rules, **kwargs), splits)
parsed = utils.dispatch(outputs, *funcs)
gathered = starmap(partial(parse_result, permit=permit), parsed)
_OUTPUT = ifilter(None, gathered)
return _OUTPUT | An operator that filters for source items matching the given rules.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {
'MODE': {'value': <'permit' or 'block'>},
'COMBINE': {'value': <'and' or 'or'>}
'RULE': [
{
'field': {'value': 'search field'},
'op': {'value': 'one of SWITCH above'},
'value': {'value': 'search term'}
}
]
}
kwargs : other inputs, e.g., to feed terminals for rule values
Returns
-------
_OUTPUT : generator of filtered items
Examples
--------
>>> import os.path as p
>>> from pipe2py.modules.pipeforever import pipe_forever
>>> from pipe2py.modules.pipefetchdata import pipe_fetchdata
>>> parent = p.dirname(p.dirname(__file__))
>>> file_name = p.abspath(p.join(parent, 'data', 'gigs.json'))
>>> path = 'value.items'
>>> url = 'file://%s' % file_name
>>> conf = {'URL': {'value': url}, 'path': {'value': path}}
>>> input = pipe_fetchdata(_INPUT=pipe_forever(), conf=conf)
>>> mode = {'value': 'permit'}
>>> combine = {'value': 'and'}
>>> rule = [{'field': {'value': 'title'}, 'op': {'value': 'contains'}, \
'value': {'value': 'web'}}]
>>> conf = {'MODE': mode, 'COMBINE': combine, 'RULE': rule}
>>> pipe_filter(_INPUT=input, conf=conf).next()['title']
u'E-Commerce Website Developer | Elance Job'
>>> rule = [{'field': {'value': 'title'}, 'op': {'value': 'contains'}, \
'value': {'value': 'kjhlked'}}]
>>> conf = {'MODE': mode, 'COMBINE': combine, 'RULE': rule}
>>> list(pipe_filter(_INPUT=input, conf=conf))
[] | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipefilter.py#L80-L152 |
ggaughan/pipe2py | pipe2py/modules/pipesplit.py | pipe_split | def pipe_split(context, _INPUT, conf, splits, **kwargs):
"""An operator that splits a source into identical copies. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : dict
splits : number of copies
Yields
------
_OUTPUT, _OUTPUT2... : copies of all source items
"""
return Split(context, _INPUT, conf, splits, **kwargs) | python | def pipe_split(context, _INPUT, conf, splits, **kwargs):
"""An operator that splits a source into identical copies. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : dict
splits : number of copies
Yields
------
_OUTPUT, _OUTPUT2... : copies of all source items
"""
return Split(context, _INPUT, conf, splits, **kwargs) | An operator that splits a source into identical copies. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : dict
splits : number of copies
Yields
------
_OUTPUT, _OUTPUT2... : copies of all source items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipesplit.py#L33-L47 |
ggaughan/pipe2py | pipe2py/modules/pipedatebuilder.py | pipe_datebuilder | def pipe_datebuilder(context=None, _INPUT=None, conf=None, **kwargs):
"""A date module that converts a text string into a datetime value. Useful
as terminal data. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items
conf : {'DATE': {'type': 'datetime', 'value': '12/2/2014'}}
Yields
------
_OUTPUT : date timetuples
"""
conf = DotDict(conf)
for item in _INPUT:
_input = DotDict(item)
date = utils.get_value(conf['DATE'], _input, **kwargs).lower()
if date.endswith(' day') or date.endswith(' days'):
count = int(date.split(' ')[0])
new_date = dt.today() + timedelta(days=count)
elif date.endswith(' year') or date.endswith(' years'):
count = int(date.split(' ')[0])
new_date = dt.today().replace(year=dt.today().year + count)
else:
new_date = SWITCH.get(date)
if not new_date:
new_date = utils.get_date(date)
if not new_date:
raise Exception('Unrecognized date string: %s' % date)
yield new_date.timetuple() | python | def pipe_datebuilder(context=None, _INPUT=None, conf=None, **kwargs):
"""A date module that converts a text string into a datetime value. Useful
as terminal data. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items
conf : {'DATE': {'type': 'datetime', 'value': '12/2/2014'}}
Yields
------
_OUTPUT : date timetuples
"""
conf = DotDict(conf)
for item in _INPUT:
_input = DotDict(item)
date = utils.get_value(conf['DATE'], _input, **kwargs).lower()
if date.endswith(' day') or date.endswith(' days'):
count = int(date.split(' ')[0])
new_date = dt.today() + timedelta(days=count)
elif date.endswith(' year') or date.endswith(' years'):
count = int(date.split(' ')[0])
new_date = dt.today().replace(year=dt.today().year + count)
else:
new_date = SWITCH.get(date)
if not new_date:
new_date = utils.get_date(date)
if not new_date:
raise Exception('Unrecognized date string: %s' % date)
yield new_date.timetuple() | A date module that converts a text string into a datetime value. Useful
as terminal data. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items
conf : {'DATE': {'type': 'datetime', 'value': '12/2/2014'}}
Yields
------
_OUTPUT : date timetuples | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipedatebuilder.py#L25-L60 |
ggaughan/pipe2py | pipe2py/twisted/utils.py | asyncImap | def asyncImap(asyncCallable, *iterables):
"""itertools.imap for deferred callables
"""
deferreds = imap(asyncCallable, *iterables)
return gatherResults(deferreds, consumeErrors=True) | python | def asyncImap(asyncCallable, *iterables):
"""itertools.imap for deferred callables
"""
deferreds = imap(asyncCallable, *iterables)
return gatherResults(deferreds, consumeErrors=True) | itertools.imap for deferred callables | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/twisted/utils.py#L86-L90 |
ggaughan/pipe2py | pipe2py/twisted/utils.py | asyncStarCmap | def asyncStarCmap(asyncCallable, iterable):
"""itertools.starmap for deferred callables using cooperative multitasking
"""
results = []
yield coopStar(asyncCallable, results.append, iterable)
returnValue(results) | python | def asyncStarCmap(asyncCallable, iterable):
"""itertools.starmap for deferred callables using cooperative multitasking
"""
results = []
yield coopStar(asyncCallable, results.append, iterable)
returnValue(results) | itertools.starmap for deferred callables using cooperative multitasking | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/twisted/utils.py#L94-L99 |
ggaughan/pipe2py | pipe2py/twisted/utils.py | asyncStarPmap | def asyncStarPmap(asyncCallable, iterable):
"""itertools.starmap for deferred callables using parallel cooperative
multitasking
"""
results = []
yield asyncStarParallel(asyncCallable, results.append, iterable)
returnValue(results) | python | def asyncStarPmap(asyncCallable, iterable):
"""itertools.starmap for deferred callables using parallel cooperative
multitasking
"""
results = []
yield asyncStarParallel(asyncCallable, results.append, iterable)
returnValue(results) | itertools.starmap for deferred callables using parallel cooperative
multitasking | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/twisted/utils.py#L103-L109 |
ggaughan/pipe2py | pipe2py/twisted/utils.py | asyncStarMap | def asyncStarMap(asyncCallable, iterable):
"""itertools.starmap for deferred callables
"""
deferreds = starmap(asyncCallable, iterable)
return gatherResults(deferreds, consumeErrors=True) | python | def asyncStarMap(asyncCallable, iterable):
"""itertools.starmap for deferred callables
"""
deferreds = starmap(asyncCallable, iterable)
return gatherResults(deferreds, consumeErrors=True) | itertools.starmap for deferred callables | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/twisted/utils.py#L112-L116 |
ggaughan/pipe2py | pipe2py/modules/piperssitembuilder.py | pipe_rssitembuilder | def pipe_rssitembuilder(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that builds an rss item. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever asyncPipe or an iterable of items or fields
conf : {
'mediaContentType': {'type': 'text', 'value': ''},
'mediaContentHeight': {'type': 'text', 'value': ''},
'mediaContentWidth': {'type': 'text', 'value': ''},
'mediaContentURL': {'type': 'text', 'value': 'url'},
'mediaThumbHeight': {'type': 'text', 'value': ''},
'mediaThumbWidth': {'type': 'text', 'value': ''},
'mediaThumbURL': {'type': 'text', 'value': 'url'},
'description': {'type': 'text', 'value': 'description'},
'pubdate': {'type': 'text', 'value': 'pubdate'},
'author': {'type': 'text', 'value': 'author'},
'title': {'type': 'text', 'value': 'title'},
'link': {'type': 'text', 'value': 'url'},
'guid': {'type': 'text', 'value': 'guid'},
}
Yields
------
_OUTPUT : items
"""
get_value = partial(utils.get_value, **kwargs)
pkwargs = utils.combine_dicts({'parse_func': get_value}, kwargs)
parse_conf = partial(utils.parse_conf, DotDict(conf), **pkwargs)
get_RSS = lambda key, value: (RSS.get(key, key), value)
get_YAHOO = lambda key, value: (YAHOO.get(key), value)
make_dict = lambda func, conf: dict(starmap(func, conf.iteritems()))
clean_dict = lambda d: dict(i for i in d.items() if all(i))
funcs = [partial(make_dict, get_RSS), partial(make_dict, get_YAHOO)]
finite = utils.finitize(_INPUT)
inputs = imap(DotDict, finite)
confs = imap(parse_conf, inputs)
splits = utils.broadcast(confs, *funcs)
combined = starmap(utils.combine_dicts, splits)
result = imap(clean_dict, combined)
_OUTPUT = imap(DotDict, result)
return _OUTPUT | python | def pipe_rssitembuilder(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that builds an rss item. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever asyncPipe or an iterable of items or fields
conf : {
'mediaContentType': {'type': 'text', 'value': ''},
'mediaContentHeight': {'type': 'text', 'value': ''},
'mediaContentWidth': {'type': 'text', 'value': ''},
'mediaContentURL': {'type': 'text', 'value': 'url'},
'mediaThumbHeight': {'type': 'text', 'value': ''},
'mediaThumbWidth': {'type': 'text', 'value': ''},
'mediaThumbURL': {'type': 'text', 'value': 'url'},
'description': {'type': 'text', 'value': 'description'},
'pubdate': {'type': 'text', 'value': 'pubdate'},
'author': {'type': 'text', 'value': 'author'},
'title': {'type': 'text', 'value': 'title'},
'link': {'type': 'text', 'value': 'url'},
'guid': {'type': 'text', 'value': 'guid'},
}
Yields
------
_OUTPUT : items
"""
get_value = partial(utils.get_value, **kwargs)
pkwargs = utils.combine_dicts({'parse_func': get_value}, kwargs)
parse_conf = partial(utils.parse_conf, DotDict(conf), **pkwargs)
get_RSS = lambda key, value: (RSS.get(key, key), value)
get_YAHOO = lambda key, value: (YAHOO.get(key), value)
make_dict = lambda func, conf: dict(starmap(func, conf.iteritems()))
clean_dict = lambda d: dict(i for i in d.items() if all(i))
funcs = [partial(make_dict, get_RSS), partial(make_dict, get_YAHOO)]
finite = utils.finitize(_INPUT)
inputs = imap(DotDict, finite)
confs = imap(parse_conf, inputs)
splits = utils.broadcast(confs, *funcs)
combined = starmap(utils.combine_dicts, splits)
result = imap(clean_dict, combined)
_OUTPUT = imap(DotDict, result)
return _OUTPUT | A source that builds an rss item. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever asyncPipe or an iterable of items or fields
conf : {
'mediaContentType': {'type': 'text', 'value': ''},
'mediaContentHeight': {'type': 'text', 'value': ''},
'mediaContentWidth': {'type': 'text', 'value': ''},
'mediaContentURL': {'type': 'text', 'value': 'url'},
'mediaThumbHeight': {'type': 'text', 'value': ''},
'mediaThumbWidth': {'type': 'text', 'value': ''},
'mediaThumbURL': {'type': 'text', 'value': 'url'},
'description': {'type': 'text', 'value': 'description'},
'pubdate': {'type': 'text', 'value': 'pubdate'},
'author': {'type': 'text', 'value': 'author'},
'title': {'type': 'text', 'value': 'title'},
'link': {'type': 'text', 'value': 'url'},
'guid': {'type': 'text', 'value': 'guid'},
}
Yields
------
_OUTPUT : items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/piperssitembuilder.py#L20-L63 |
ggaughan/pipe2py | pipe2py/modules/pipestrconcat.py | asyncPipeStrconcat | def asyncPipeStrconcat(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that asynchronously builds a string. Loopable. No direct
input.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : {
'part': [
{'value': <'<img src="'>},
{'subkey': <'img.src'>},
{'value': <'">'>}
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of joined strings
"""
splits = yield asyncGetSplits(_INPUT, conf['part'], **cdicts(opts, kwargs))
_OUTPUT = yield asyncStarMap(partial(maybeDeferred, parse_result), splits)
returnValue(iter(_OUTPUT)) | python | def asyncPipeStrconcat(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that asynchronously builds a string. Loopable. No direct
input.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : {
'part': [
{'value': <'<img src="'>},
{'subkey': <'img.src'>},
{'value': <'">'>}
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of joined strings
"""
splits = yield asyncGetSplits(_INPUT, conf['part'], **cdicts(opts, kwargs))
_OUTPUT = yield asyncStarMap(partial(maybeDeferred, parse_result), splits)
returnValue(iter(_OUTPUT)) | A string module that asynchronously builds a string. Loopable. No direct
input.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : {
'part': [
{'value': <'<img src="'>},
{'subkey': <'img.src'>},
{'value': <'">'>}
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of joined strings | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipestrconcat.py#L28-L50 |
ggaughan/pipe2py | pipe2py/modules/pipestrconcat.py | pipe_strconcat | def pipe_strconcat(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that builds a string. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items
conf : {
'part': [
{'value': '<img src="'},
{'subkey': 'img.src'},
{'value': '">'}
]
}
Returns
-------
_OUTPUT : generator of joined strings
"""
splits = get_splits(_INPUT, conf['part'], **cdicts(opts, kwargs))
_OUTPUT = starmap(parse_result, splits)
return _OUTPUT | python | def pipe_strconcat(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that builds a string. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items
conf : {
'part': [
{'value': '<img src="'},
{'subkey': 'img.src'},
{'value': '">'}
]
}
Returns
-------
_OUTPUT : generator of joined strings
"""
splits = get_splits(_INPUT, conf['part'], **cdicts(opts, kwargs))
_OUTPUT = starmap(parse_result, splits)
return _OUTPUT | A string module that builds a string. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items
conf : {
'part': [
{'value': '<img src="'},
{'subkey': 'img.src'},
{'value': '">'}
]
}
Returns
-------
_OUTPUT : generator of joined strings | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipestrconcat.py#L54-L75 |
ggaughan/pipe2py | pipe2py/modules/pipeuniq.py | asyncPipeUniq | def asyncPipeUniq(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that asynchronously filters out non unique items according
to the specified field. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items
conf : {'field': {'type': 'text', 'value': <field to be unique>}}
returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of unique items
"""
_input = yield _INPUT
asyncFuncs = yield asyncGetSplits(None, conf, **cdicts(opts, kwargs))
pieces = yield asyncFuncs[0]()
_pass = yield asyncFuncs[2]()
_OUTPUT = _input if _pass else unique_items(_input, pieces.field)
returnValue(_OUTPUT) | python | def asyncPipeUniq(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that asynchronously filters out non unique items according
to the specified field. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items
conf : {'field': {'type': 'text', 'value': <field to be unique>}}
returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of unique items
"""
_input = yield _INPUT
asyncFuncs = yield asyncGetSplits(None, conf, **cdicts(opts, kwargs))
pieces = yield asyncFuncs[0]()
_pass = yield asyncFuncs[2]()
_OUTPUT = _input if _pass else unique_items(_input, pieces.field)
returnValue(_OUTPUT) | An operator that asynchronously filters out non unique items according
to the specified field. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items
conf : {'field': {'type': 'text', 'value': <field to be unique>}}
returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of unique items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipeuniq.py#L31-L50 |
ggaughan/pipe2py | pipe2py/modules/pipeuniq.py | pipe_uniq | def pipe_uniq(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that filters out non unique items according to the specified
field. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
kwargs -- other inputs, e.g. to feed terminals for rule values
conf : {'field': {'type': 'text', 'value': <field to be unique>}}
Returns
-------
_OUTPUT : generator of unique items
"""
funcs = get_splits(None, conf, **cdicts(opts, kwargs))
pieces, _pass = funcs[0](), funcs[2]()
_OUTPUT = _INPUT if _pass else unique_items(_INPUT, pieces.field)
return _OUTPUT | python | def pipe_uniq(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that filters out non unique items according to the specified
field. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
kwargs -- other inputs, e.g. to feed terminals for rule values
conf : {'field': {'type': 'text', 'value': <field to be unique>}}
Returns
-------
_OUTPUT : generator of unique items
"""
funcs = get_splits(None, conf, **cdicts(opts, kwargs))
pieces, _pass = funcs[0](), funcs[2]()
_OUTPUT = _INPUT if _pass else unique_items(_INPUT, pieces.field)
return _OUTPUT | An operator that filters out non unique items according to the specified
field. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
kwargs -- other inputs, e.g. to feed terminals for rule values
conf : {'field': {'type': 'text', 'value': <field to be unique>}}
Returns
-------
_OUTPUT : generator of unique items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipeuniq.py#L54-L72 |
ggaughan/pipe2py | pipe2py/modules/pipeunion.py | asyncPipeUnion | def asyncPipeUnion(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that asynchronously merges multiple source together.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : unused
Keyword arguments
-----------------
_OTHER1 : asyncPipe like object
_OTHER2 : etc.
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items
"""
_input = yield _INPUT
_OUTPUT = get_output(_input, **kwargs)
returnValue(_OUTPUT) | python | def asyncPipeUnion(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that asynchronously merges multiple source together.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : unused
Keyword arguments
-----------------
_OTHER1 : asyncPipe like object
_OTHER2 : etc.
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items
"""
_input = yield _INPUT
_OUTPUT = get_output(_input, **kwargs)
returnValue(_OUTPUT) | An operator that asynchronously merges multiple source together.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : unused
Keyword arguments
-----------------
_OTHER1 : asyncPipe like object
_OTHER2 : etc.
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipeunion.py#L26-L47 |
ggaughan/pipe2py | pipe2py/modules/pipeunion.py | pipe_union | def pipe_union(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that merges multiple source together. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : unused
Keyword arguments
-----------------
_OTHER1 : pipe2py.modules pipe like object
_OTHER2 : etc.
Returns
-------
_OUTPUT : generator of items
"""
_OUTPUT = get_output(_INPUT, **kwargs)
return _OUTPUT | python | def pipe_union(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that merges multiple source together. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : unused
Keyword arguments
-----------------
_OTHER1 : pipe2py.modules pipe like object
_OTHER2 : etc.
Returns
-------
_OUTPUT : generator of items
"""
_OUTPUT = get_output(_INPUT, **kwargs)
return _OUTPUT | An operator that merges multiple source together. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : unused
Keyword arguments
-----------------
_OTHER1 : pipe2py.modules pipe like object
_OTHER2 : etc.
Returns
-------
_OUTPUT : generator of items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipeunion.py#L51-L70 |
ggaughan/pipe2py | pipe2py/modules/pipesort.py | pipe_sort | def pipe_sort(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that sorts the input source according to the specified key.
Not loopable. Not lazy.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
kwargs -- other inputs, e.g. to feed terminals for rule values
conf : {
'KEY': [
{
'field': {'type': 'text', 'value': 'title'},
'dir': {'type': 'text', 'value': 'DESC'}
}
]
}
Returns
-------
_OUTPUT : generator of sorted items
"""
test = kwargs.pop('pass_if', None)
_pass = utils.get_pass(test=test)
key_defs = imap(DotDict, utils.listize(conf['KEY']))
get_value = partial(utils.get_value, **kwargs)
parse_conf = partial(utils.parse_conf, parse_func=get_value, **kwargs)
keys = imap(parse_conf, key_defs)
order = ('%s%s' % ('-' if k.dir == 'DESC' else '', k.field) for k in keys)
comparers = map(get_comparer, order)
cmp_func = partial(multikeysort, comparers=comparers)
_OUTPUT = _INPUT if _pass else iter(sorted(_INPUT, cmp=cmp_func))
return _OUTPUT | python | def pipe_sort(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that sorts the input source according to the specified key.
Not loopable. Not lazy.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
kwargs -- other inputs, e.g. to feed terminals for rule values
conf : {
'KEY': [
{
'field': {'type': 'text', 'value': 'title'},
'dir': {'type': 'text', 'value': 'DESC'}
}
]
}
Returns
-------
_OUTPUT : generator of sorted items
"""
test = kwargs.pop('pass_if', None)
_pass = utils.get_pass(test=test)
key_defs = imap(DotDict, utils.listize(conf['KEY']))
get_value = partial(utils.get_value, **kwargs)
parse_conf = partial(utils.parse_conf, parse_func=get_value, **kwargs)
keys = imap(parse_conf, key_defs)
order = ('%s%s' % ('-' if k.dir == 'DESC' else '', k.field) for k in keys)
comparers = map(get_comparer, order)
cmp_func = partial(multikeysort, comparers=comparers)
_OUTPUT = _INPUT if _pass else iter(sorted(_INPUT, cmp=cmp_func))
return _OUTPUT | An operator that sorts the input source according to the specified key.
Not loopable. Not lazy.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
kwargs -- other inputs, e.g. to feed terminals for rule values
conf : {
'KEY': [
{
'field': {'type': 'text', 'value': 'title'},
'dir': {'type': 'text', 'value': 'DESC'}
}
]
}
Returns
-------
_OUTPUT : generator of sorted items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipesort.py#L40-L72 |
ggaughan/pipe2py | pipe2py/modules/pipecreaterss.py | pipe_createrss | def pipe_createrss(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that converts a source into an RSS stream. Not loopable.
"""
conf = DotDict(conf)
for item in _INPUT:
item = DotDict(item)
yield {
value: item.get(conf.get(key, **kwargs))
for key, value in RSS_FIELDS.items()} | python | def pipe_createrss(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that converts a source into an RSS stream. Not loopable.
"""
conf = DotDict(conf)
for item in _INPUT:
item = DotDict(item)
yield {
value: item.get(conf.get(key, **kwargs))
for key, value in RSS_FIELDS.items()} | An operator that converts a source into an RSS stream. Not loopable. | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipecreaterss.py#L40-L51 |
ggaughan/pipe2py | pipe2py/modules/pipefetchsitefeed.py | pipe_fetchsitefeed | def pipe_fetchsitefeed(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that fetches and parses the first feed found on one or more
sites. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : URL -- url
Yields
------
_OUTPUT : items
"""
conf = DotDict(conf)
urls = utils.listize(conf['URL'])
for item in _INPUT:
for item_url in urls:
url = utils.get_value(DotDict(item_url), DotDict(item), **kwargs)
url = utils.get_abspath(url)
if context and context.verbose:
print "pipe_fetchsitefeed loading:", url
for link in autorss.getRSSLink(url.encode('utf-8')):
parsed = speedparser.parse(urlopen(link).read())
for entry in utils.gen_entries(parsed):
yield entry
if item.get('forever'):
# _INPUT is pipeforever and not a loop,
# so we just yield our item once
break | python | def pipe_fetchsitefeed(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that fetches and parses the first feed found on one or more
sites. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : URL -- url
Yields
------
_OUTPUT : items
"""
conf = DotDict(conf)
urls = utils.listize(conf['URL'])
for item in _INPUT:
for item_url in urls:
url = utils.get_value(DotDict(item_url), DotDict(item), **kwargs)
url = utils.get_abspath(url)
if context and context.verbose:
print "pipe_fetchsitefeed loading:", url
for link in autorss.getRSSLink(url.encode('utf-8')):
parsed = speedparser.parse(urlopen(link).read())
for entry in utils.gen_entries(parsed):
yield entry
if item.get('forever'):
# _INPUT is pipeforever and not a loop,
# so we just yield our item once
break | A source that fetches and parses the first feed found on one or more
sites. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : URL -- url
Yields
------
_OUTPUT : items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipefetchsitefeed.py#L18-L52 |
ggaughan/pipe2py | pipe2py/modules/piperegex.py | asyncPipeRegex | def asyncPipeRegex(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that asynchronously replaces text in items using regexes.
Each has the general format: "In [field] replace [match] with [replace]".
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : {
'RULE': [
{
'field': {'value': <'search field'>},
'match': {'value': <'regex'>},
'replace': {'value': <'replacement'>},
'globalmatch': {'value': '1'},
'singlelinematch': {'value': '2'},
'multilinematch': {'value': '4'},
'casematch': {'value': '8'}
}
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items
"""
splits = yield asyncGetSplits(_INPUT, conf['RULE'], **cdicts(opts, kwargs))
asyncConvert = partial(maybeDeferred, convert_func)
asyncFuncs = get_async_dispatch_funcs('pass', asyncConvert)
parsed = yield asyncDispatch(splits, *asyncFuncs)
_OUTPUT = yield maybeDeferred(parse_results, parsed)
returnValue(iter(_OUTPUT)) | python | def asyncPipeRegex(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that asynchronously replaces text in items using regexes.
Each has the general format: "In [field] replace [match] with [replace]".
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : {
'RULE': [
{
'field': {'value': <'search field'>},
'match': {'value': <'regex'>},
'replace': {'value': <'replacement'>},
'globalmatch': {'value': '1'},
'singlelinematch': {'value': '2'},
'multilinematch': {'value': '4'},
'casematch': {'value': '8'}
}
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items
"""
splits = yield asyncGetSplits(_INPUT, conf['RULE'], **cdicts(opts, kwargs))
asyncConvert = partial(maybeDeferred, convert_func)
asyncFuncs = get_async_dispatch_funcs('pass', asyncConvert)
parsed = yield asyncDispatch(splits, *asyncFuncs)
_OUTPUT = yield maybeDeferred(parse_results, parsed)
returnValue(iter(_OUTPUT)) | An operator that asynchronously replaces text in items using regexes.
Each has the general format: "In [field] replace [match] with [replace]".
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : {
'RULE': [
{
'field': {'value': <'search field'>},
'match': {'value': <'regex'>},
'replace': {'value': <'replacement'>},
'globalmatch': {'value': '1'},
'singlelinematch': {'value': '2'},
'multilinematch': {'value': '4'},
'casematch': {'value': '8'}
}
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/piperegex.py#L47-L79 |
ggaughan/pipe2py | pipe2py/modules/piperegex.py | pipe_regex | def pipe_regex(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that replaces text in items using regexes. Each has the
general format: "In [field] replace [match] with [replace]". Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {
'RULE': [
{
'field': {'value': <'search field'>},
'match': {'value': <'regex'>},
'replace': {'value': <'replacement'>},
'globalmatch': {'value': '1'},
'singlelinematch': {'value': '2'},
'multilinematch': {'value': '4'},
'casematch': {'value': '8'}
}
]
}
Returns
-------
_OUTPUT : generator of items
"""
splits = get_splits(_INPUT, conf['RULE'], **cdicts(opts, kwargs))
parsed = utils.dispatch(splits, *get_dispatch_funcs('pass', convert_func))
_OUTPUT = parse_results(parsed)
return _OUTPUT | python | def pipe_regex(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that replaces text in items using regexes. Each has the
general format: "In [field] replace [match] with [replace]". Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {
'RULE': [
{
'field': {'value': <'search field'>},
'match': {'value': <'regex'>},
'replace': {'value': <'replacement'>},
'globalmatch': {'value': '1'},
'singlelinematch': {'value': '2'},
'multilinematch': {'value': '4'},
'casematch': {'value': '8'}
}
]
}
Returns
-------
_OUTPUT : generator of items
"""
splits = get_splits(_INPUT, conf['RULE'], **cdicts(opts, kwargs))
parsed = utils.dispatch(splits, *get_dispatch_funcs('pass', convert_func))
_OUTPUT = parse_results(parsed)
return _OUTPUT | An operator that replaces text in items using regexes. Each has the
general format: "In [field] replace [match] with [replace]". Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {
'RULE': [
{
'field': {'value': <'search field'>},
'match': {'value': <'regex'>},
'replace': {'value': <'replacement'>},
'globalmatch': {'value': '1'},
'singlelinematch': {'value': '2'},
'multilinematch': {'value': '4'},
'casematch': {'value': '8'}
}
]
}
Returns
-------
_OUTPUT : generator of items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/piperegex.py#L100-L129 |
ggaughan/pipe2py | pipe2py/modules/pipestrreplace.py | asyncPipeStrreplace | def asyncPipeStrreplace(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that asynchronously replaces text. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings
conf : {
'RULE': [
{
'param': {'value': <match type: 1=first, 2=last, 3=every>},
'find': {'value': <text to find>},
'replace': {'value': <replacement>}
}
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of replaced strings
"""
splits = yield asyncGetSplits(_INPUT, conf['RULE'], **kwargs)
parsed = yield asyncDispatch(splits, *get_async_dispatch_funcs())
_OUTPUT = yield asyncStarMap(asyncParseResult, parsed)
returnValue(iter(_OUTPUT)) | python | def asyncPipeStrreplace(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that asynchronously replaces text. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings
conf : {
'RULE': [
{
'param': {'value': <match type: 1=first, 2=last, 3=every>},
'find': {'value': <text to find>},
'replace': {'value': <replacement>}
}
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of replaced strings
"""
splits = yield asyncGetSplits(_INPUT, conf['RULE'], **kwargs)
parsed = yield asyncDispatch(splits, *get_async_dispatch_funcs())
_OUTPUT = yield asyncStarMap(asyncParseResult, parsed)
returnValue(iter(_OUTPUT)) | A string module that asynchronously replaces text. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings
conf : {
'RULE': [
{
'param': {'value': <match type: 1=first, 2=last, 3=every>},
'find': {'value': <text to find>},
'replace': {'value': <replacement>}
}
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of replaced strings | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipestrreplace.py#L38-L62 |
ggaughan/pipe2py | pipe2py/modules/pipestrreplace.py | pipe_strreplace | def pipe_strreplace(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that replaces text. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : iterable of items or strings
conf : {
'RULE': [
{
'param': {'value': <match type: 1=first, 2=last, 3=every>},
'find': {'value': <text to find>},
'replace': {'value': <replacement>}
}
]
}
Returns
-------
_OUTPUT : generator of replaced strings
"""
splits = get_splits(_INPUT, conf['RULE'], **kwargs)
parsed = utils.dispatch(splits, *get_dispatch_funcs())
_OUTPUT = starmap(parse_result, parsed)
return _OUTPUT | python | def pipe_strreplace(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that replaces text. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : iterable of items or strings
conf : {
'RULE': [
{
'param': {'value': <match type: 1=first, 2=last, 3=every>},
'find': {'value': <text to find>},
'replace': {'value': <replacement>}
}
]
}
Returns
-------
_OUTPUT : generator of replaced strings
"""
splits = get_splits(_INPUT, conf['RULE'], **kwargs)
parsed = utils.dispatch(splits, *get_dispatch_funcs())
_OUTPUT = starmap(parse_result, parsed)
return _OUTPUT | A string module that replaces text. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : iterable of items or strings
conf : {
'RULE': [
{
'param': {'value': <match type: 1=first, 2=last, 3=every>},
'find': {'value': <text to find>},
'replace': {'value': <replacement>}
}
]
}
Returns
-------
_OUTPUT : generator of replaced strings | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipestrreplace.py#L70-L94 |
ggaughan/pipe2py | pipe2py/modules/pipetruncate.py | asyncPipeUniq | def asyncPipeUniq(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that asynchronously returns a specified number of items from
the top of a feed. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items
conf : {
'start': {'type': 'number', value': <starting location>}
'count': {'type': 'number', value': <desired feed length>}
}
returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of unique items
"""
_input = yield _INPUT
asyncFuncs = yield asyncGetSplits(None, conf, **cdicts(opts, kwargs))
pieces = yield asyncFuncs[0]()
_pass = yield asyncFuncs[2]()
if _pass:
_OUTPUT = _input
else:
start = int(pieces.start)
stop = start + int(pieces.count)
_OUTPUT = islice(_input, start, stop)
returnValue(_OUTPUT) | python | def asyncPipeUniq(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that asynchronously returns a specified number of items from
the top of a feed. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items
conf : {
'start': {'type': 'number', value': <starting location>}
'count': {'type': 'number', value': <desired feed length>}
}
returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of unique items
"""
_input = yield _INPUT
asyncFuncs = yield asyncGetSplits(None, conf, **cdicts(opts, kwargs))
pieces = yield asyncFuncs[0]()
_pass = yield asyncFuncs[2]()
if _pass:
_OUTPUT = _input
else:
start = int(pieces.start)
stop = start + int(pieces.count)
_OUTPUT = islice(_input, start, stop)
returnValue(_OUTPUT) | An operator that asynchronously returns a specified number of items from
the top of a feed. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items
conf : {
'start': {'type': 'number', value': <starting location>}
'count': {'type': 'number', value': <desired feed length>}
}
returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of unique items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipetruncate.py#L20-L49 |
ggaughan/pipe2py | pipe2py/modules/pipetruncate.py | pipe_truncate | def pipe_truncate(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that returns a specified number of items from the top of a
feed. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
kwargs -- terminal, if the truncation value is wired in
conf : {
'start': {'type': 'number', value': <starting location>}
'count': {'type': 'number', value': <desired feed length>}
}
Returns
-------
_OUTPUT : generator of items
"""
funcs = get_splits(None, conf, **cdicts(opts, kwargs))
pieces, _pass = funcs[0](), funcs[2]()
if _pass:
_OUTPUT = _INPUT
else:
try:
start = int(pieces.start)
except AttributeError:
start = 0
stop = start + int(pieces.count)
_OUTPUT = islice(_INPUT, start, stop)
return _OUTPUT | python | def pipe_truncate(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that returns a specified number of items from the top of a
feed. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
kwargs -- terminal, if the truncation value is wired in
conf : {
'start': {'type': 'number', value': <starting location>}
'count': {'type': 'number', value': <desired feed length>}
}
Returns
-------
_OUTPUT : generator of items
"""
funcs = get_splits(None, conf, **cdicts(opts, kwargs))
pieces, _pass = funcs[0](), funcs[2]()
if _pass:
_OUTPUT = _INPUT
else:
try:
start = int(pieces.start)
except AttributeError:
start = 0
stop = start + int(pieces.count)
_OUTPUT = islice(_INPUT, start, stop)
return _OUTPUT | An operator that returns a specified number of items from the top of a
feed. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
kwargs -- terminal, if the truncation value is wired in
conf : {
'start': {'type': 'number', value': <starting location>}
'count': {'type': 'number', value': <desired feed length>}
}
Returns
-------
_OUTPUT : generator of items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipetruncate.py#L53-L85 |
ggaughan/pipe2py | pipe2py/modules/pipestringtokenizer.py | asyncPipeStringtokenizer | def asyncPipeStringtokenizer(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that asynchronously splits a string into tokens
delimited by separators. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings
conf : {
'to-str': {'value': <delimiter>},
'dedupe': {'type': 'bool', value': <1>},
'sort': {'type': 'bool', value': <1>}
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items
"""
conf['delimiter'] = conf.pop('to-str', dict.get(conf, 'delimiter'))
splits = yield asyncGetSplits(_INPUT, conf, **cdicts(opts, kwargs))
parsed = yield asyncDispatch(splits, *get_async_dispatch_funcs())
items = yield asyncStarMap(partial(maybeDeferred, parse_result), parsed)
_OUTPUT = utils.multiplex(items)
returnValue(_OUTPUT) | python | def asyncPipeStringtokenizer(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that asynchronously splits a string into tokens
delimited by separators. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings
conf : {
'to-str': {'value': <delimiter>},
'dedupe': {'type': 'bool', value': <1>},
'sort': {'type': 'bool', value': <1>}
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items
"""
conf['delimiter'] = conf.pop('to-str', dict.get(conf, 'delimiter'))
splits = yield asyncGetSplits(_INPUT, conf, **cdicts(opts, kwargs))
parsed = yield asyncDispatch(splits, *get_async_dispatch_funcs())
items = yield asyncStarMap(partial(maybeDeferred, parse_result), parsed)
_OUTPUT = utils.multiplex(items)
returnValue(_OUTPUT) | A string module that asynchronously splits a string into tokens
delimited by separators. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings
conf : {
'to-str': {'value': <delimiter>},
'dedupe': {'type': 'bool', value': <1>},
'sort': {'type': 'bool', value': <1>}
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipestringtokenizer.py#L46-L69 |
ggaughan/pipe2py | pipe2py/modules/pipeexchangerate.py | asyncPipeExchangerate | def asyncPipeExchangerate(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that asynchronously retrieves the current exchange rate
for a given currency pair. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings (base currency)
conf : {
'quote': {'value': <'USD'>},
'default': {'value': <'USD'>},
'offline': {'type': 'bool', 'value': '0'},
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of hashed strings
"""
offline = conf.get('offline', {}).get('value')
# TODO add async rate data fetching
rate_data = get_offline_rate_data() if offline else get_rate_data()
rates = parse_request(rate_data)
splits = yield asyncGetSplits(_INPUT, conf, **cdicts(opts, kwargs))
parsed = yield asyncDispatch(splits, *get_async_dispatch_funcs())
_OUTPUT = starmap(partial(parse_result, rates=rates), parsed)
returnValue(iter(_OUTPUT)) | python | def asyncPipeExchangerate(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that asynchronously retrieves the current exchange rate
for a given currency pair. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings (base currency)
conf : {
'quote': {'value': <'USD'>},
'default': {'value': <'USD'>},
'offline': {'type': 'bool', 'value': '0'},
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of hashed strings
"""
offline = conf.get('offline', {}).get('value')
# TODO add async rate data fetching
rate_data = get_offline_rate_data() if offline else get_rate_data()
rates = parse_request(rate_data)
splits = yield asyncGetSplits(_INPUT, conf, **cdicts(opts, kwargs))
parsed = yield asyncDispatch(splits, *get_async_dispatch_funcs())
_OUTPUT = starmap(partial(parse_result, rates=rates), parsed)
returnValue(iter(_OUTPUT)) | A string module that asynchronously retrieves the current exchange rate
for a given currency pair. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings (base currency)
conf : {
'quote': {'value': <'USD'>},
'default': {'value': <'USD'>},
'offline': {'type': 'bool', 'value': '0'},
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of hashed strings | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipeexchangerate.py#L100-L125 |
ggaughan/pipe2py | pipe2py/modules/pipeexchangerate.py | pipe_exchangerate | def pipe_exchangerate(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that retrieves the current exchange rate for a given
currency pair. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : iterable of items or strings (base currency)
conf : {
'quote': {'value': <'USD'>},
'default': {'value': <'USD'>},
'offline': {'type': 'bool', 'value': '0'},
}
Returns
-------
_OUTPUT : generator of hashed strings
"""
offline = conf.get('offline', {}).get('value')
rate_data = get_offline_rate_data(err=False) if offline else get_rate_data()
rates = parse_request(rate_data)
splits = get_splits(_INPUT, conf, **cdicts(opts, kwargs))
parsed = utils.dispatch(splits, *get_dispatch_funcs())
_OUTPUT = starmap(partial(parse_result, rates=rates), parsed)
return _OUTPUT | python | def pipe_exchangerate(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that retrieves the current exchange rate for a given
currency pair. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : iterable of items or strings (base currency)
conf : {
'quote': {'value': <'USD'>},
'default': {'value': <'USD'>},
'offline': {'type': 'bool', 'value': '0'},
}
Returns
-------
_OUTPUT : generator of hashed strings
"""
offline = conf.get('offline', {}).get('value')
rate_data = get_offline_rate_data(err=False) if offline else get_rate_data()
rates = parse_request(rate_data)
splits = get_splits(_INPUT, conf, **cdicts(opts, kwargs))
parsed = utils.dispatch(splits, *get_dispatch_funcs())
_OUTPUT = starmap(partial(parse_result, rates=rates), parsed)
return _OUTPUT | A string module that retrieves the current exchange rate for a given
currency pair. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : iterable of items or strings (base currency)
conf : {
'quote': {'value': <'USD'>},
'default': {'value': <'USD'>},
'offline': {'type': 'bool', 'value': '0'},
}
Returns
-------
_OUTPUT : generator of hashed strings | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipeexchangerate.py#L145-L169 |
ggaughan/pipe2py | pipe2py/modules/pipestrtransform.py | pipe_strtransform | def pipe_strtransform(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that splits a string into tokens delimited by
separators. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : iterable of items or strings
conf : {'transformation': {value': <'swapcase'>}}
Returns
-------
_OUTPUT : generator of tokenized strings
"""
splits = get_splits(_INPUT, conf, **cdicts(opts, kwargs))
parsed = utils.dispatch(splits, *get_dispatch_funcs())
_OUTPUT = starmap(parse_result, parsed)
return _OUTPUT | python | def pipe_strtransform(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that splits a string into tokens delimited by
separators. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : iterable of items or strings
conf : {'transformation': {value': <'swapcase'>}}
Returns
-------
_OUTPUT : generator of tokenized strings
"""
splits = get_splits(_INPUT, conf, **cdicts(opts, kwargs))
parsed = utils.dispatch(splits, *get_dispatch_funcs())
_OUTPUT = starmap(parse_result, parsed)
return _OUTPUT | A string module that splits a string into tokens delimited by
separators. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : iterable of items or strings
conf : {'transformation': {value': <'swapcase'>}}
Returns
-------
_OUTPUT : generator of tokenized strings | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipestrtransform.py#L51-L68 |
ggaughan/pipe2py | pipe2py/modules/pipeprivateinput.py | pipe_privateinput | def pipe_privateinput(context=None, _INPUT=None, conf=None, **kwargs):
"""An input that prompts the user for some text and yields it forever.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : unused
conf : {
'name': {'value': 'parameter name'},
'prompt': {'value': 'User prompt'},
'default': {'value': 'default value'},
'debug': {'value': 'debug value'}
}
Yields
------
_OUTPUT : text
"""
value = utils.get_input(context, conf)
while True:
yield value | python | def pipe_privateinput(context=None, _INPUT=None, conf=None, **kwargs):
"""An input that prompts the user for some text and yields it forever.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : unused
conf : {
'name': {'value': 'parameter name'},
'prompt': {'value': 'User prompt'},
'default': {'value': 'default value'},
'debug': {'value': 'debug value'}
}
Yields
------
_OUTPUT : text
"""
value = utils.get_input(context, conf)
while True:
yield value | An input that prompts the user for some text and yields it forever.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : unused
conf : {
'name': {'value': 'parameter name'},
'prompt': {'value': 'User prompt'},
'default': {'value': 'default value'},
'debug': {'value': 'debug value'}
}
Yields
------
_OUTPUT : text | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipeprivateinput.py#L13-L35 |
ggaughan/pipe2py | pipe2py/modules/pipedateformat.py | pipe_dateformat | def pipe_dateformat(context=None, _INPUT=None, conf=None, **kwargs):
"""Formats a datetime value. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipedatebuilder pipe like object (iterable of date timetuples)
conf : {
'format': {'value': <'%B %d, %Y'>},
'timezone': {'value': <'EST'>}
}
Yields
------
_OUTPUT : formatted dates
"""
conf = DotDict(conf)
loop_with = kwargs.pop('with', None)
date_format = conf.get('format', **kwargs)
# timezone = conf.get('timezone', **kwargs)
for item in _INPUT:
_with = item.get(loop_with, **kwargs) if loop_with else item
try:
# todo: check that all PHP formats are covered by Python
date_string = time.strftime(date_format, _with)
except TypeError as e:
if context and context.verbose:
print 'Error formatting date: %s' % item
print e
continue
else:
yield date_string | python | def pipe_dateformat(context=None, _INPUT=None, conf=None, **kwargs):
"""Formats a datetime value. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipedatebuilder pipe like object (iterable of date timetuples)
conf : {
'format': {'value': <'%B %d, %Y'>},
'timezone': {'value': <'EST'>}
}
Yields
------
_OUTPUT : formatted dates
"""
conf = DotDict(conf)
loop_with = kwargs.pop('with', None)
date_format = conf.get('format', **kwargs)
# timezone = conf.get('timezone', **kwargs)
for item in _INPUT:
_with = item.get(loop_with, **kwargs) if loop_with else item
try:
# todo: check that all PHP formats are covered by Python
date_string = time.strftime(date_format, _with)
except TypeError as e:
if context and context.verbose:
print 'Error formatting date: %s' % item
print e
continue
else:
yield date_string | Formats a datetime value. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipedatebuilder pipe like object (iterable of date timetuples)
conf : {
'format': {'value': <'%B %d, %Y'>},
'timezone': {'value': <'EST'>}
}
Yields
------
_OUTPUT : formatted dates | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipedateformat.py#L15-L49 |
ggaughan/pipe2py | pipe2py/modules/pipesubelement.py | pipe_subelement | def pipe_subelement(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator extracts select sub-elements from a feed. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {'path': {'value': <element path>}}
Yields
------
_OUTPUT : items
"""
path = DotDict(conf).get('path', **kwargs)
for item in _INPUT:
element = DotDict(item).get(path, **kwargs)
for i in utils.gen_items(element):
yield {'content': i}
if item.get('forever'):
# _INPUT is pipeforever and not a loop,
# so we just yield our item once
break | python | def pipe_subelement(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator extracts select sub-elements from a feed. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {'path': {'value': <element path>}}
Yields
------
_OUTPUT : items
"""
path = DotDict(conf).get('path', **kwargs)
for item in _INPUT:
element = DotDict(item).get(path, **kwargs)
for i in utils.gen_items(element):
yield {'content': i}
if item.get('forever'):
# _INPUT is pipeforever and not a loop,
# so we just yield our item once
break | An operator extracts select sub-elements from a feed. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {'path': {'value': <element path>}}
Yields
------
_OUTPUT : items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipesubelement.py#L14-L38 |
ggaughan/pipe2py | pipe2py/modules/pipefeedautodiscovery.py | pipe_feedautodiscovery | def pipe_feedautodiscovery(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that searches for and returns feed links found in a page.
Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : URL -- url
Yields
------
_OUTPUT : items
"""
conf = DotDict(conf)
urls = utils.listize(conf['URL'])
for item in _INPUT:
for item_url in urls:
url = utils.get_value(DotDict(item_url), DotDict(item), **kwargs)
url = utils.get_abspath(url)
if context and context.verbose:
print "pipe_feedautodiscovery loading:", url
for entry in autorss.getRSSLink(url.encode('utf-8')):
yield {'link': entry}
# todo: add rel, type, title
if item.get('forever'):
# _INPUT is pipeforever and not a loop,
# so we just yield our item once
break | python | def pipe_feedautodiscovery(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that searches for and returns feed links found in a page.
Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : URL -- url
Yields
------
_OUTPUT : items
"""
conf = DotDict(conf)
urls = utils.listize(conf['URL'])
for item in _INPUT:
for item_url in urls:
url = utils.get_value(DotDict(item_url), DotDict(item), **kwargs)
url = utils.get_abspath(url)
if context and context.verbose:
print "pipe_feedautodiscovery loading:", url
for entry in autorss.getRSSLink(url.encode('utf-8')):
yield {'link': entry}
# todo: add rel, type, title
if item.get('forever'):
# _INPUT is pipeforever and not a loop,
# so we just yield our item once
break | A source that searches for and returns feed links found in a page.
Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : URL -- url
Yields
------
_OUTPUT : items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipefeedautodiscovery.py#L14-L46 |
ggaughan/pipe2py | pipe2py/modules/pipeurlinput.py | pipe_urlinput | def pipe_urlinput(context=None, _INPUT=None, conf=None, **kwargs):
"""An input that prompts the user for a url and yields it forever.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : unused
conf : {
'name': {'value': 'parameter name'},
'prompt': {'value': 'User prompt'},
'default': {'value': 'default value'},
'debug': {'value': 'debug value'}
}
Yields
------
_OUTPUT : url
"""
value = utils.get_input(context, conf)
value = utils.url_quote(value)
while True:
yield value | python | def pipe_urlinput(context=None, _INPUT=None, conf=None, **kwargs):
"""An input that prompts the user for a url and yields it forever.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : unused
conf : {
'name': {'value': 'parameter name'},
'prompt': {'value': 'User prompt'},
'default': {'value': 'default value'},
'debug': {'value': 'debug value'}
}
Yields
------
_OUTPUT : url
"""
value = utils.get_input(context, conf)
value = utils.url_quote(value)
while True:
yield value | An input that prompts the user for a url and yields it forever.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : unused
conf : {
'name': {'value': 'parameter name'},
'prompt': {'value': 'User prompt'},
'default': {'value': 'default value'},
'debug': {'value': 'debug value'}
}
Yields
------
_OUTPUT : url | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipeurlinput.py#L13-L36 |
ggaughan/pipe2py | pipe2py/modules/pipeyql.py | pipe_yql | def pipe_yql(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that issues YQL queries. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : yqlquery -- YQL query
# todo: handle envURL
Yields
------
_OUTPUT : query results
"""
# todo: get from a config/env file
url = "http://query.yahooapis.com/v1/public/yql"
conf = DotDict(conf)
query = conf['yqlquery']
for item in _INPUT:
item = DotDict(item)
yql = utils.get_value(query, item, **kwargs)
# note: we use the default format of xml since json loses some
# structure
# todo: diagnostics=true e.g. if context.test
# todo: consider paging for large result sets
r = requests.get(url, params={'q': yql}, stream=True)
# Parse the response
tree = parse(r.raw)
if context and context.verbose:
print "pipe_yql loading xml:", yql
root = tree.getroot()
# note: query also has row count
results = root.find('results')
# Convert xml into generation of dicts
for element in results.getchildren():
yield utils.etree_to_dict(element)
if item.get('forever'):
# _INPUT is pipeforever and not a loop,
# so we just yield our item once
break | python | def pipe_yql(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that issues YQL queries. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : yqlquery -- YQL query
# todo: handle envURL
Yields
------
_OUTPUT : query results
"""
# todo: get from a config/env file
url = "http://query.yahooapis.com/v1/public/yql"
conf = DotDict(conf)
query = conf['yqlquery']
for item in _INPUT:
item = DotDict(item)
yql = utils.get_value(query, item, **kwargs)
# note: we use the default format of xml since json loses some
# structure
# todo: diagnostics=true e.g. if context.test
# todo: consider paging for large result sets
r = requests.get(url, params={'q': yql}, stream=True)
# Parse the response
tree = parse(r.raw)
if context and context.verbose:
print "pipe_yql loading xml:", yql
root = tree.getroot()
# note: query also has row count
results = root.find('results')
# Convert xml into generation of dicts
for element in results.getchildren():
yield utils.etree_to_dict(element)
if item.get('forever'):
# _INPUT is pipeforever and not a loop,
# so we just yield our item once
break | A source that issues YQL queries. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : yqlquery -- YQL query
# todo: handle envURL
Yields
------
_OUTPUT : query results | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipeyql.py#L17-L64 |
ggaughan/pipe2py | pipe2py/modules/pipenumberinput.py | pipe_numberinput | def pipe_numberinput(context=None, _INPUT=None, conf=None, **kwargs):
"""An input that prompts the user for a number and yields it forever.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : not used
conf : {
'name': {'value': 'parameter name'},
'prompt': {'value': 'User prompt'},
'default': {'value': 'default value'},
'debug': {'value': 'debug value'}
}
Yields
------
_OUTPUT : text
"""
value = utils.get_input(context, conf)
try:
value = int(value)
except:
value = 0
while True:
yield value | python | def pipe_numberinput(context=None, _INPUT=None, conf=None, **kwargs):
"""An input that prompts the user for a number and yields it forever.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : not used
conf : {
'name': {'value': 'parameter name'},
'prompt': {'value': 'User prompt'},
'default': {'value': 'default value'},
'debug': {'value': 'debug value'}
}
Yields
------
_OUTPUT : text
"""
value = utils.get_input(context, conf)
try:
value = int(value)
except:
value = 0
while True:
yield value | An input that prompts the user for a number and yields it forever.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : not used
conf : {
'name': {'value': 'parameter name'},
'prompt': {'value': 'User prompt'},
'default': {'value': 'default value'},
'debug': {'value': 'debug value'}
}
Yields
------
_OUTPUT : text | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipenumberinput.py#L13-L40 |
ggaughan/pipe2py | pipe2py/modules/pipeurlbuilder.py | pipe_urlbuilder | def pipe_urlbuilder(context=None, _INPUT=None, conf=None, **kwargs):
"""A url module that builds a url. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : {
'PARAM': [
{'key': {'value': <'order'>}, 'value': {'value': <'desc'>}},
{'key': {'value': <'page'>}, 'value': {'value': <'2'>}}
]
'PATH': {'type': 'text', 'value': <''>},
'BASE': {'type': 'text', 'value': <'http://site.com/feed.xml'>},
}
Yields
------
_OUTPUT : url
"""
pkwargs = cdicts(opts, kwargs)
get_params = get_funcs(conf.get('PARAM', []), **kwargs)[0]
get_paths = get_funcs(conf.get('PATH', []), **pkwargs)[0]
get_base = get_funcs(conf['BASE'], listize=False, **pkwargs)[0]
parse_params = utils.parse_params
splits = get_splits(_INPUT, funcs=[get_params, get_paths, get_base])
parsed = utils.dispatch(splits, *get_dispatch_funcs('pass', parse_params))
_OUTPUT = starmap(parse_result, parsed)
return _OUTPUT | python | def pipe_urlbuilder(context=None, _INPUT=None, conf=None, **kwargs):
"""A url module that builds a url. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : {
'PARAM': [
{'key': {'value': <'order'>}, 'value': {'value': <'desc'>}},
{'key': {'value': <'page'>}, 'value': {'value': <'2'>}}
]
'PATH': {'type': 'text', 'value': <''>},
'BASE': {'type': 'text', 'value': <'http://site.com/feed.xml'>},
}
Yields
------
_OUTPUT : url
"""
pkwargs = cdicts(opts, kwargs)
get_params = get_funcs(conf.get('PARAM', []), **kwargs)[0]
get_paths = get_funcs(conf.get('PATH', []), **pkwargs)[0]
get_base = get_funcs(conf['BASE'], listize=False, **pkwargs)[0]
parse_params = utils.parse_params
splits = get_splits(_INPUT, funcs=[get_params, get_paths, get_base])
parsed = utils.dispatch(splits, *get_dispatch_funcs('pass', parse_params))
_OUTPUT = starmap(parse_result, parsed)
return _OUTPUT | A url module that builds a url. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : {
'PARAM': [
{'key': {'value': <'order'>}, 'value': {'value': <'desc'>}},
{'key': {'value': <'page'>}, 'value': {'value': <'2'>}}
]
'PATH': {'type': 'text', 'value': <''>},
'BASE': {'type': 'text', 'value': <'http://site.com/feed.xml'>},
}
Yields
------
_OUTPUT : url | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipeurlbuilder.py#L29-L57 |
ggaughan/pipe2py | pipe2py/modules/pipecsv.py | pipe_csv | def pipe_csv(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that fetches and parses a csv file to yield items. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : URL -- url
skip -- number of header rows to skip
col_mode -- column name source: row=header row(s),
custom=defined in col_name
col_name -- list of custom column names
col_row_start -- first column header row
col_row_end -- last column header row
separator -- column separator
Yields
------
_OUTPUT : items
Note:
Current restrictions:
separator must be 1 character
assumes every row has exactly the expected number of fields, as defined
in the header
"""
conf = DotDict(conf)
conf_sep = conf['separator']
conf_mode = conf['col_mode']
col_name = conf['col_name']
for item in _INPUT:
item = DotDict(item)
url = utils.get_value(conf['URL'], item, **kwargs)
url = utils.get_abspath(url)
separator = utils.get_value(conf_sep, item, encode=True, **kwargs)
skip = int(utils.get_value(conf['skip'], item, **kwargs))
col_mode = utils.get_value(conf_mode, item, **kwargs)
f = urlopen(url)
if context and context.verbose:
print "pipe_csv loading:", url
for i in xrange(skip):
f.next()
reader = csv.UnicodeReader(f, delimiter=separator)
fieldnames = []
if col_mode == 'custom':
fieldnames = [DotDict(x).get() for x in col_name]
else:
fieldnames = _gen_fieldnames(conf, reader, item, **kwargs)
for rows in reader:
yield dict(zip(fieldnames, rows))
f.close()
if item.get('forever'):
# _INPUT is pipeforever and not a loop,
# so we just yield our item once
break | python | def pipe_csv(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that fetches and parses a csv file to yield items. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : URL -- url
skip -- number of header rows to skip
col_mode -- column name source: row=header row(s),
custom=defined in col_name
col_name -- list of custom column names
col_row_start -- first column header row
col_row_end -- last column header row
separator -- column separator
Yields
------
_OUTPUT : items
Note:
Current restrictions:
separator must be 1 character
assumes every row has exactly the expected number of fields, as defined
in the header
"""
conf = DotDict(conf)
conf_sep = conf['separator']
conf_mode = conf['col_mode']
col_name = conf['col_name']
for item in _INPUT:
item = DotDict(item)
url = utils.get_value(conf['URL'], item, **kwargs)
url = utils.get_abspath(url)
separator = utils.get_value(conf_sep, item, encode=True, **kwargs)
skip = int(utils.get_value(conf['skip'], item, **kwargs))
col_mode = utils.get_value(conf_mode, item, **kwargs)
f = urlopen(url)
if context and context.verbose:
print "pipe_csv loading:", url
for i in xrange(skip):
f.next()
reader = csv.UnicodeReader(f, delimiter=separator)
fieldnames = []
if col_mode == 'custom':
fieldnames = [DotDict(x).get() for x in col_name]
else:
fieldnames = _gen_fieldnames(conf, reader, item, **kwargs)
for rows in reader:
yield dict(zip(fieldnames, rows))
f.close()
if item.get('forever'):
# _INPUT is pipeforever and not a loop,
# so we just yield our item once
break | A source that fetches and parses a csv file to yield items. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : URL -- url
skip -- number of header rows to skip
col_mode -- column name source: row=header row(s),
custom=defined in col_name
col_name -- list of custom column names
col_row_start -- first column header row
col_row_end -- last column header row
separator -- column separator
Yields
------
_OUTPUT : items
Note:
Current restrictions:
separator must be 1 character
assumes every row has exactly the expected number of fields, as defined
in the header | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipecsv.py#L24-L87 |
ggaughan/pipe2py | pipe2py/modules/piperename.py | asyncPipeRename | def asyncPipeRename(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that asynchronously renames or copies fields in the input
source. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : {
'RULE': [
{
'op': {'value': 'rename or copy'},
'field': {'value': 'old field'},
'newval': {'value': 'new field'}
}
]
}
kwargs : other inputs, e.g., to feed terminals for rule values
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items
"""
splits = yield asyncGetSplits(_INPUT, conf['RULE'], **cdicts(opts, kwargs))
_OUTPUT = yield maybeDeferred(parse_results, splits, **kwargs)
returnValue(_OUTPUT) | python | def asyncPipeRename(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that asynchronously renames or copies fields in the input
source. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : {
'RULE': [
{
'op': {'value': 'rename or copy'},
'field': {'value': 'old field'},
'newval': {'value': 'new field'}
}
]
}
kwargs : other inputs, e.g., to feed terminals for rule values
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items
"""
splits = yield asyncGetSplits(_INPUT, conf['RULE'], **cdicts(opts, kwargs))
_OUTPUT = yield maybeDeferred(parse_results, splits, **kwargs)
returnValue(_OUTPUT) | An operator that asynchronously renames or copies fields in the input
source. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : {
'RULE': [
{
'op': {'value': 'rename or copy'},
'field': {'value': 'old field'},
'newval': {'value': 'new field'}
}
]
}
kwargs : other inputs, e.g., to feed terminals for rule values
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/piperename.py#L44-L70 |
ggaughan/pipe2py | pipe2py/modules/piperename.py | pipe_rename | def pipe_rename(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that renames or copies fields in the input source.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {
'RULE': [
{
'op': {'value': 'rename or copy'},
'field': {'value': 'old field'},
'newval': {'value': 'new field'}
}
]
}
kwargs : other inputs, e.g., to feed terminals for rule values
Returns
-------
_OUTPUT : generator of items
"""
splits = get_splits(_INPUT, conf['RULE'], **cdicts(opts, kwargs))
_OUTPUT = parse_results(splits, **kwargs)
return _OUTPUT | python | def pipe_rename(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that renames or copies fields in the input source.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {
'RULE': [
{
'op': {'value': 'rename or copy'},
'field': {'value': 'old field'},
'newval': {'value': 'new field'}
}
]
}
kwargs : other inputs, e.g., to feed terminals for rule values
Returns
-------
_OUTPUT : generator of items
"""
splits = get_splits(_INPUT, conf['RULE'], **cdicts(opts, kwargs))
_OUTPUT = parse_results(splits, **kwargs)
return _OUTPUT | An operator that renames or copies fields in the input source.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {
'RULE': [
{
'op': {'value': 'rename or copy'},
'field': {'value': 'old field'},
'newval': {'value': 'new field'}
}
]
}
kwargs : other inputs, e.g., to feed terminals for rule values
Returns
-------
_OUTPUT : generator of items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/piperename.py#L74-L100 |
ggaughan/pipe2py | pipe2py/modules/pipereverse.py | pipe_reverse | def pipe_reverse(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that reverses the order of source items. Not loopable. Not
lazy.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : unused
Yields
------
_OUTPUT : items
"""
for item in reversed(list(_INPUT)):
yield item | python | def pipe_reverse(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that reverses the order of source items. Not loopable. Not
lazy.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : unused
Yields
------
_OUTPUT : items
"""
for item in reversed(list(_INPUT)):
yield item | An operator that reverses the order of source items. Not loopable. Not
lazy.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : unused
Yields
------
_OUTPUT : items | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipereverse.py#L11-L26 |
ggaughan/pipe2py | pipe2py/modules/pipecount.py | pipe_count | def pipe_count(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that counts the number of _INPUT items and yields it
forever. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : not used
Yields
------
_OUTPUT : number of items in the feed
Examples
--------
>>> generator = (x for x in xrange(5))
>>> count = pipe_count(_INPUT=generator)
>>> count #doctest: +ELLIPSIS
<generator object pipe_count at 0x...>
>>> count.next()
5
"""
count = len(list(_INPUT))
# todo: check all operators (not placeable in loops)
while True:
yield count | python | def pipe_count(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that counts the number of _INPUT items and yields it
forever. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : not used
Yields
------
_OUTPUT : number of items in the feed
Examples
--------
>>> generator = (x for x in xrange(5))
>>> count = pipe_count(_INPUT=generator)
>>> count #doctest: +ELLIPSIS
<generator object pipe_count at 0x...>
>>> count.next()
5
"""
count = len(list(_INPUT))
# todo: check all operators (not placeable in loops)
while True:
yield count | An operator that counts the number of _INPUT items and yields it
forever. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : not used
Yields
------
_OUTPUT : number of items in the feed
Examples
--------
>>> generator = (x for x in xrange(5))
>>> count = pipe_count(_INPUT=generator)
>>> count #doctest: +ELLIPSIS
<generator object pipe_count at 0x...>
>>> count.next()
5 | https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipecount.py#L12-L39 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.