language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def hash(ctx: click.Context, path: str) -> None:
"""
recursive hash files in PATH and store hash in database.
"""
path_pl: pathlib.Path = pathlib.Path(path).resolve()
t = mytimer()
dbname: pathlib.Path = ctx.obj['dbname']
hsh = Hasher(dbname)
hsh.clean()
number_scanned, number_hashed = hsh.hashdir(path_pl)
console.print(
f"[green] scanned {number_scanned} and hashed thereof {number_hashed} files in {t.get} sec") | def hash(ctx: click.Context, path: str) -> None:
"""
recursive hash files in PATH and store hash in database.
"""
path_pl: pathlib.Path = pathlib.Path(path).resolve()
t = mytimer()
dbname: pathlib.Path = ctx.obj['dbname']
hsh = Hasher(dbname)
hsh.clean()
number_scanned, number_hashed = hsh.hashdir(path_pl)
console.print(
f"[green] scanned {number_scanned} and hashed thereof {number_hashed} files in {t.get} sec") |
Python | def help() -> None:
"""
Display some useful expressions for exiftool.
"""
click.echo("\nembedded exiftool help:")
click.echo(
"show dateTimeOriginal for all files:\texiftool -p '$filename $dateTimeOriginal' .")
click.echo(
"set dateTimeOrginial for all files: \texiftool -dateTimeOriginal='YYYY:mm:dd HH:MM:SS' .")
click.echo(
"rename files: \t\t\t\texiftool -filename=newname . {%f: filename %e: extension %c copynumber}")
click.echo("move file in dir to newdir/YEAR/MONTH: \texiftool -progress -recurse '-Directory<DateTimeOriginal' -d newdir/%Y/%m dir")
click.echo("\n\nembedded unix tool help:")
click.echo("find/delete empty dirs: \t\t\tfind . -type d -empty <-delete>") | def help() -> None:
"""
Display some useful expressions for exiftool.
"""
click.echo("\nembedded exiftool help:")
click.echo(
"show dateTimeOriginal for all files:\texiftool -p '$filename $dateTimeOriginal' .")
click.echo(
"set dateTimeOrginial for all files: \texiftool -dateTimeOriginal='YYYY:mm:dd HH:MM:SS' .")
click.echo(
"rename files: \t\t\t\texiftool -filename=newname . {%f: filename %e: extension %c copynumber}")
click.echo("move file in dir to newdir/YEAR/MONTH: \texiftool -progress -recurse '-Directory<DateTimeOriginal' -d newdir/%Y/%m dir")
click.echo("\n\nembedded unix tool help:")
click.echo("find/delete empty dirs: \t\t\tfind . -type d -empty <-delete>") |
Python | def resnet56(**kwargs):
"""Constructs a ResNet-56 model.
"""
model = ResNet(Bottleneck, [9, 9, 9], **kwargs)
return model | def resnet56(**kwargs):
"""Constructs a ResNet-56 model.
"""
model = ResNet(Bottleneck, [9, 9, 9], **kwargs)
return model |
Python | def resnet44(**kwargs):
"""Constructs a ResNet-44 model.
"""
model = ResNet(Bottleneck, [7, 7, 7], **kwargs)
return model | def resnet44(**kwargs):
"""Constructs a ResNet-44 model.
"""
model = ResNet(Bottleneck, [7, 7, 7], **kwargs)
return model |
Python | def resnet110(**kwargs):
"""Constructs a ResNet-110 model.
"""
model = ResNet(Bottleneck, [18, 18, 18], **kwargs)
return model | def resnet110(**kwargs):
"""Constructs a ResNet-110 model.
"""
model = ResNet(Bottleneck, [18, 18, 18], **kwargs)
return model |
Python | def resnet1202(**kwargs):
"""Constructs a ResNet-1202 model.
"""
model = ResNet(Bottleneck, [200, 200, 200], **kwargs)
return model | def resnet1202(**kwargs):
"""Constructs a ResNet-1202 model.
"""
model = ResNet(Bottleneck, [200, 200, 200], **kwargs)
return model |
Python | def create_edit_exam(request, id=None):
"""
This is an inline formset to create a new exam entry along with exam subjects that can have multiple occurences.
"""
user = request.user
if id:
exam_master = get_object_or_404(ExamMaster, id=id)
exam_subject = ExamSubject.objects.filter(exam_master=exam_master)
formset = ExamSubjectInlineFormSet(instance=exam_master)
if exam_master.created_by != request.user:
return HttpResponseForbidden()
else:
exam_master = ExamMaster(created_by=user)
formset = ExamSubjectInlineFormSet(instance=exam_master)
if request.POST:
form = ExamMasterForm(request.POST, instance=exam_master)
formset = ExamSubjectInlineFormSet(request.POST or None,prefix='exam_subject')
if form.is_valid():
exam_form = form.save(commit=False)
classe = exam_form.select_class
students = StudentAdmission.objects.filter(class_name=classe)
if id:
exam_form.last_user = user
formset = ExamSubjectInlineFormSet(request.POST,prefix='exam_subject',instance=exam_form)
if formset.is_valid():
exam_form.save()
exam_subject = formset.save(commit=False)
for e in exam_subject:
if id:
e.last_user = user
else: e.created_by = user
e.save()
exam_form_id = exam_form.id
exam_subject_id = e.id
for student in students:
exam_mark = ExamMarkEntry.objects.create(
exam_master_id = exam_form_id,
created_by = request.user,
student = student,
exam_subject_id = exam_subject_id,
)
return redirect('examination:exam_list')
else:
print("formset not valid")
print("error ", formset.errors)
print("non form error ", formset.non_form_errors())
else: print("form not valid")
else:
form = ExamMasterForm(instance=exam_master)
formset = ExamSubjectInlineFormSet(instance=exam_master, prefix='exam_subject')
variables = {
'form': form,
'formset': formset
}
template = 'examination/exam_subjects.html'
return render(request, template, variables) | def create_edit_exam(request, id=None):
"""
This is an inline formset to create a new exam entry along with exam subjects that can have multiple occurences.
"""
user = request.user
if id:
exam_master = get_object_or_404(ExamMaster, id=id)
exam_subject = ExamSubject.objects.filter(exam_master=exam_master)
formset = ExamSubjectInlineFormSet(instance=exam_master)
if exam_master.created_by != request.user:
return HttpResponseForbidden()
else:
exam_master = ExamMaster(created_by=user)
formset = ExamSubjectInlineFormSet(instance=exam_master)
if request.POST:
form = ExamMasterForm(request.POST, instance=exam_master)
formset = ExamSubjectInlineFormSet(request.POST or None,prefix='exam_subject')
if form.is_valid():
exam_form = form.save(commit=False)
classe = exam_form.select_class
students = StudentAdmission.objects.filter(class_name=classe)
if id:
exam_form.last_user = user
formset = ExamSubjectInlineFormSet(request.POST,prefix='exam_subject',instance=exam_form)
if formset.is_valid():
exam_form.save()
exam_subject = formset.save(commit=False)
for e in exam_subject:
if id:
e.last_user = user
else: e.created_by = user
e.save()
exam_form_id = exam_form.id
exam_subject_id = e.id
for student in students:
exam_mark = ExamMarkEntry.objects.create(
exam_master_id = exam_form_id,
created_by = request.user,
student = student,
exam_subject_id = exam_subject_id,
)
return redirect('examination:exam_list')
else:
print("formset not valid")
print("error ", formset.errors)
print("non form error ", formset.non_form_errors())
else: print("form not valid")
else:
form = ExamMasterForm(instance=exam_master)
formset = ExamSubjectInlineFormSet(instance=exam_master, prefix='exam_subject')
variables = {
'form': form,
'formset': formset
}
template = 'examination/exam_subjects.html'
return render(request, template, variables) |
Python | def create_edit_class(request, id=None):
"""
This is an inline formset to create a new class entry along with class syllabus that can have multiple occurences.
"""
user = request.user
if id:
classes = get_object_or_404(Class, id=id)
print('classes:', classes)
class_syllabus = ClassSyllabus.objects.filter(select_class=classes)
formset = ClassSyllabusInlineFormSet(instance=classes)
if classes.created_by != request.user:
return HttpResponseForbidden()
else:
classes = Class(created_by=user)
formset = ClassSyllabusInlineFormSet(instance=classes)
if request.POST:
form = ClassForm(request.POST, instance=classes)
formset = ClassSyllabusInlineFormSet(request.POST or None,prefix='class_syllabus')
if form.is_valid():
class_form = form.save(commit=False)
if id:
class_form.last_user = user
formset = ClassSyllabusInlineFormSet(request.POST,prefix='class_syllabus',instance=class_form)
if formset.is_valid():
class_form.save()
class_syllabus = formset.save(commit=False)
for e in class_syllabus:
if id:
e.last_user = user
else: e.created_by = user
e.save()
return redirect('classes:class_list')
else:
print("formset not valid")
print("error ", formset.errors)
print("non form error ", formset.non_form_errors())
else: print("form not valid")
else:
form = ClassForm(instance=classes)
formset = ClassSyllabusInlineFormSet(instance=classes, prefix='class_syllabus')
variables = {
'form': form,
'formset': formset
}
template = 'classes/class_syllabus.html'
return render(request, template, variables) | def create_edit_class(request, id=None):
"""
This is an inline formset to create a new class entry along with class syllabus that can have multiple occurences.
"""
user = request.user
if id:
classes = get_object_or_404(Class, id=id)
print('classes:', classes)
class_syllabus = ClassSyllabus.objects.filter(select_class=classes)
formset = ClassSyllabusInlineFormSet(instance=classes)
if classes.created_by != request.user:
return HttpResponseForbidden()
else:
classes = Class(created_by=user)
formset = ClassSyllabusInlineFormSet(instance=classes)
if request.POST:
form = ClassForm(request.POST, instance=classes)
formset = ClassSyllabusInlineFormSet(request.POST or None,prefix='class_syllabus')
if form.is_valid():
class_form = form.save(commit=False)
if id:
class_form.last_user = user
formset = ClassSyllabusInlineFormSet(request.POST,prefix='class_syllabus',instance=class_form)
if formset.is_valid():
class_form.save()
class_syllabus = formset.save(commit=False)
for e in class_syllabus:
if id:
e.last_user = user
else: e.created_by = user
e.save()
return redirect('classes:class_list')
else:
print("formset not valid")
print("error ", formset.errors)
print("non form error ", formset.non_form_errors())
else: print("form not valid")
else:
form = ClassForm(instance=classes)
formset = ClassSyllabusInlineFormSet(instance=classes, prefix='class_syllabus')
variables = {
'form': form,
'formset': formset
}
template = 'classes/class_syllabus.html'
return render(request, template, variables) |
Python | def create_edit_discipline(request, id=None):
"""
This is an inline formset to create a new discipline entry along with discipline details that can have multiple occurences.
"""
user = request.user
if id:
discipline = get_object_or_404(Disciplines, id=id)
discipline_details = Disciplines_Details.objects.filter(discipline=discipline)
formset = DisciplineDetailsInlineFormSet(instance=discipline)
if discipline.created_by != request.user:
return HttpResponseForbidden()
else:
discipline = Disciplines(created_by=user)
formset = DisciplineDetailsInlineFormSet(instance=discipline)
if request.POST:
form = DisciplineForm(request.POST, instance=discipline)
formset = DisciplineDetailsInlineFormSet(request.POST,prefix='discipline_detail')
if form.is_valid():
discipline_form = form.save(commit=False)
if id:
discipline_form.last_user = user
formset = DisciplineDetailsInlineFormSet(request.POST,prefix='discipline_detail',instance=discipline_form)
if formset.is_valid():
discipline_form.save()
discipline_details = formset.save(commit=False)
for e in discipline_details:
if id:
e.last_user = user
else: e.created_by = user
e.save()
return redirect('students:disciplines_list')
else:
print("formset not valid")
print("error ", formset.errors)
print("non form error ", formset.non_form_errors())
else: print("form not valid")
else:
form = DisciplineForm(instance=discipline)
formset = DisciplineDetailsInlineFormSet(instance=discipline, prefix='discipline_detail')
variables = {
'form': form,
'formset': formset
}
template = 'students/disciplines/discipline_form.html'
return render(request, template, variables) | def create_edit_discipline(request, id=None):
"""
This is an inline formset to create a new discipline entry along with discipline details that can have multiple occurences.
"""
user = request.user
if id:
discipline = get_object_or_404(Disciplines, id=id)
discipline_details = Disciplines_Details.objects.filter(discipline=discipline)
formset = DisciplineDetailsInlineFormSet(instance=discipline)
if discipline.created_by != request.user:
return HttpResponseForbidden()
else:
discipline = Disciplines(created_by=user)
formset = DisciplineDetailsInlineFormSet(instance=discipline)
if request.POST:
form = DisciplineForm(request.POST, instance=discipline)
formset = DisciplineDetailsInlineFormSet(request.POST,prefix='discipline_detail')
if form.is_valid():
discipline_form = form.save(commit=False)
if id:
discipline_form.last_user = user
formset = DisciplineDetailsInlineFormSet(request.POST,prefix='discipline_detail',instance=discipline_form)
if formset.is_valid():
discipline_form.save()
discipline_details = formset.save(commit=False)
for e in discipline_details:
if id:
e.last_user = user
else: e.created_by = user
e.save()
return redirect('students:disciplines_list')
else:
print("formset not valid")
print("error ", formset.errors)
print("non form error ", formset.non_form_errors())
else: print("form not valid")
else:
form = DisciplineForm(instance=discipline)
formset = DisciplineDetailsInlineFormSet(instance=discipline, prefix='discipline_detail')
variables = {
'form': form,
'formset': formset
}
template = 'students/disciplines/discipline_form.html'
return render(request, template, variables) |
Python | def decode_string(v, encoding="utf-8"):
"""Returns the given value as a Unicode string (if possible)."""
if isinstance(encoding, basestring):
encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore"))
if isinstance(v, str):
for e in encoding:
try:
return v.decode(*e)
except:
pass
return v
return unicode(v) | def decode_string(v, encoding="utf-8"):
"""Returns the given value as a Unicode string (if possible)."""
if isinstance(encoding, basestring):
encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore"))
if isinstance(v, str):
for e in encoding:
try:
return v.decode(*e)
except:
pass
return v
return unicode(v) |
Python | def encode_string(v, encoding="utf-8"):
"""Returns the given value as a Python byte string (if possible)."""
if isinstance(encoding, basestring):
encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore"))
if isinstance(v, unicode):
for e in encoding:
try:
return v.encode(*e)
except:
pass
return v
return str(v) | def encode_string(v, encoding="utf-8"):
"""Returns the given value as a Python byte string (if possible)."""
if isinstance(encoding, basestring):
encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore"))
if isinstance(v, unicode):
for e in encoding:
try:
return v.encode(*e)
except:
pass
return v
return str(v) |
Python | def _fetch(self, function, *args, **kwargs):
"""Executes the function and sets AsynchronousRequest.response."""
try:
self._response = function(*args, **kwargs)
except Exception as e:
self._error = e | def _fetch(self, function, *args, **kwargs):
"""Executes the function and sets AsynchronousRequest.response."""
try:
self._response = function(*args, **kwargs)
except Exception as e:
self._error = e |
Python | def _parse(self):
"""Parses all the parts of the URL string to a dictionary.
URL format: protocal://username:password@domain:port/path/page?querystring#anchor
For example: http://user:[email protected]:992/animal/bird?species=seagull&q#wings
This is a cached method that is only invoked when necessary, and only once.
"""
p = urlsplit(self._string)
P = {PROTOCOL: p[0], # http
USERNAME: u"", # user
PASSWORD: u"", # pass
DOMAIN: p[1], # example.com
PORT: u"", # 992
PATH: p[2], # [animal]
PAGE: u"", # bird
QUERY: urldecode(p[3]), # {"species": "seagull", "q": None}
ANCHOR: p[4] # wings
}
# Split the username and password from the domain.
if "@" in P[DOMAIN]:
P[USERNAME], \
P[PASSWORD] = (p[1].split("@")[0].split(":") + [u""])[:2]
P[DOMAIN] = p[1].split("@")[1]
# Split the port number from the domain.
if ":" in P[DOMAIN]:
P[DOMAIN], \
P[PORT] = P[DOMAIN].split(":")
P[PORT] = P[PORT].isdigit() and int(P[PORT]) or P[PORT]
# Split the base page from the path.
if "/" in P[PATH]:
P[PAGE] = p[2].split("/")[-1]
P[PATH] = p[2][:len(p[2]) - len(P[PAGE])].strip("/").split("/")
P[PATH] = filter(lambda v: v != "", P[PATH])
else:
P[PAGE] = p[2].strip("/")
P[PATH] = []
self.__dict__["_parts"] = P | def _parse(self):
"""Parses all the parts of the URL string to a dictionary.
URL format: protocal://username:password@domain:port/path/page?querystring#anchor
For example: http://user:[email protected]:992/animal/bird?species=seagull&q#wings
This is a cached method that is only invoked when necessary, and only once.
"""
p = urlsplit(self._string)
P = {PROTOCOL: p[0], # http
USERNAME: u"", # user
PASSWORD: u"", # pass
DOMAIN: p[1], # example.com
PORT: u"", # 992
PATH: p[2], # [animal]
PAGE: u"", # bird
QUERY: urldecode(p[3]), # {"species": "seagull", "q": None}
ANCHOR: p[4] # wings
}
# Split the username and password from the domain.
if "@" in P[DOMAIN]:
P[USERNAME], \
P[PASSWORD] = (p[1].split("@")[0].split(":") + [u""])[:2]
P[DOMAIN] = p[1].split("@")[1]
# Split the port number from the domain.
if ":" in P[DOMAIN]:
P[DOMAIN], \
P[PORT] = P[DOMAIN].split(":")
P[PORT] = P[PORT].isdigit() and int(P[PORT]) or P[PORT]
# Split the base page from the path.
if "/" in P[PATH]:
P[PAGE] = p[2].split("/")[-1]
P[PATH] = p[2][:len(p[2]) - len(P[PAGE])].strip("/").split("/")
P[PATH] = filter(lambda v: v != "", P[PATH])
else:
P[PAGE] = p[2].strip("/")
P[PATH] = []
self.__dict__["_parts"] = P |
Python | def parts(self):
"""Yields a dictionary with the URL parts."""
if not self._parts:
self._parse()
return self._parts | def parts(self):
"""Yields a dictionary with the URL parts."""
if not self._parts:
self._parse()
return self._parts |
Python | def open(self, timeout=10, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None):
"""Returns a connection to the url from which data can be retrieved
with connection.read().
When the timeout amount of seconds is exceeded, raises a
URLTimeout. When an error occurs, raises a URLError (e.g.
HTTP404NotFound).
"""
url = self.string
# Handle local files with urllib.urlopen() instead of
# urllib2.urlopen().
if os.path.exists(url):
return urlopen(url)
# Handle method=POST with query string as a separate parameter.
post = self.method == POST and self.querystring or None
socket.setdefaulttimeout(timeout)
# Handle proxies and cookies.
handlers = []
if proxy:
handlers.append(urllib2.ProxyHandler({proxy[1]: proxy[0]}))
handlers.append(urllib2.HTTPCookieProcessor(CookieJar()))
handlers.append(urllib2.HTTPHandler)
urllib2.install_opener(urllib2.build_opener(*handlers))
# Send request.
try:
request = urllib2.Request(bytestring(url), post, {
"User-Agent": user_agent,
"Referer": referrer
})
# Basic authentication is established with
# authentication=(username, password).
if authentication is not None:
request.add_header("Authorization", "Basic %s" %
base64.encodestring('%s:%s' % authentication))
return urllib2.urlopen(request)
except urllib2.HTTPError as e:
if e.code == 301:
raise HTTP301Redirect(src=e, url=url)
if e.code == 400:
raise HTTP400BadRequest(src=e, url=url)
if e.code == 401:
raise HTTP401Authentication(src=e, url=url)
if e.code == 403:
raise HTTP403Forbidden(src=e, url=url)
if e.code == 404:
raise HTTP404NotFound(src=e, url=url)
if e.code == 420:
raise HTTP420Error(src=e, url=url)
if e.code == 429:
raise HTTP429TooMayRequests(src=e, url=url)
if e.code == 500:
raise HTTP500InternalServerError(src=e, url=url)
if e.code == 503:
raise HTTP503ServiceUnavailable(src=e, url=url)
raise HTTPError(str(e), src=e, url=url)
except BadStatusLine as e:
raise HTTPError(str(e), src=e, url=url)
except socket.timeout as e:
raise URLTimeout(src=e, url=url)
except socket.error as e:
if "timed out" in str((e.args + ("", ""))[0]) \
or "timed out" in str((e.args + ("", ""))[1]):
raise URLTimeout(src=e, url=url)
raise URLError(str(e), src=e, url=url)
except urllib2.URLError as e:
if "timed out" in str(e.reason):
raise URLTimeout(src=e, url=url)
raise URLError(str(e), src=e, url=url)
except ValueError as e:
raise URLError(str(e), src=e, url=url) | def open(self, timeout=10, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None):
"""Returns a connection to the url from which data can be retrieved
with connection.read().
When the timeout amount of seconds is exceeded, raises a
URLTimeout. When an error occurs, raises a URLError (e.g.
HTTP404NotFound).
"""
url = self.string
# Handle local files with urllib.urlopen() instead of
# urllib2.urlopen().
if os.path.exists(url):
return urlopen(url)
# Handle method=POST with query string as a separate parameter.
post = self.method == POST and self.querystring or None
socket.setdefaulttimeout(timeout)
# Handle proxies and cookies.
handlers = []
if proxy:
handlers.append(urllib2.ProxyHandler({proxy[1]: proxy[0]}))
handlers.append(urllib2.HTTPCookieProcessor(CookieJar()))
handlers.append(urllib2.HTTPHandler)
urllib2.install_opener(urllib2.build_opener(*handlers))
# Send request.
try:
request = urllib2.Request(bytestring(url), post, {
"User-Agent": user_agent,
"Referer": referrer
})
# Basic authentication is established with
# authentication=(username, password).
if authentication is not None:
request.add_header("Authorization", "Basic %s" %
base64.encodestring('%s:%s' % authentication))
return urllib2.urlopen(request)
except urllib2.HTTPError as e:
if e.code == 301:
raise HTTP301Redirect(src=e, url=url)
if e.code == 400:
raise HTTP400BadRequest(src=e, url=url)
if e.code == 401:
raise HTTP401Authentication(src=e, url=url)
if e.code == 403:
raise HTTP403Forbidden(src=e, url=url)
if e.code == 404:
raise HTTP404NotFound(src=e, url=url)
if e.code == 420:
raise HTTP420Error(src=e, url=url)
if e.code == 429:
raise HTTP429TooMayRequests(src=e, url=url)
if e.code == 500:
raise HTTP500InternalServerError(src=e, url=url)
if e.code == 503:
raise HTTP503ServiceUnavailable(src=e, url=url)
raise HTTPError(str(e), src=e, url=url)
except BadStatusLine as e:
raise HTTPError(str(e), src=e, url=url)
except socket.timeout as e:
raise URLTimeout(src=e, url=url)
except socket.error as e:
if "timed out" in str((e.args + ("", ""))[0]) \
or "timed out" in str((e.args + ("", ""))[1]):
raise URLTimeout(src=e, url=url)
raise URLError(str(e), src=e, url=url)
except urllib2.URLError as e:
if "timed out" in str(e.reason):
raise URLTimeout(src=e, url=url)
raise URLError(str(e), src=e, url=url)
except ValueError as e:
raise URLError(str(e), src=e, url=url) |
Python | def download(self, timeout=10, cached=True, throttle=0, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None, unicode=False, **kwargs):
"""Downloads the content at the given URL (by default it will be cached
locally).
Unless unicode=False, the content is returned as a unicode string.
"""
# Filter OAuth parameters from cache id (they will be unique for each
# request).
if self._parts is None and self.method == GET and "oauth_" not in self._string:
id = self._string
else:
id = repr(self.parts)
id = re.sub("u{0,1}'oauth_.*?': u{0,1}'.*?', ", "", id)
# Keep a separate cache of unicode and raw download for same URL.
if unicode is True:
id = "u" + id
if cached and id in CACHE:
if isinstance(CACHE, dict): # Not a Cache object.
return CACHE[id]
if unicode is True:
return CACHE[id]
if unicode is False:
return CACHE.get(id, unicode=False)
t = time.time()
# Open a connection with the given settings, read it and (by default)
# cache the data.
try:
data = self.open(
timeout, proxy, user_agent, referrer, authentication).read()
except socket.timeout as e:
raise URLTimeout(src=e, url=self.string)
if unicode is True:
data = u(data)
if cached:
CACHE[id] = data
if throttle:
time.sleep(max(throttle - (time.time() - t), 0))
return data | def download(self, timeout=10, cached=True, throttle=0, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None, unicode=False, **kwargs):
"""Downloads the content at the given URL (by default it will be cached
locally).
Unless unicode=False, the content is returned as a unicode string.
"""
# Filter OAuth parameters from cache id (they will be unique for each
# request).
if self._parts is None and self.method == GET and "oauth_" not in self._string:
id = self._string
else:
id = repr(self.parts)
id = re.sub("u{0,1}'oauth_.*?': u{0,1}'.*?', ", "", id)
# Keep a separate cache of unicode and raw download for same URL.
if unicode is True:
id = "u" + id
if cached and id in CACHE:
if isinstance(CACHE, dict): # Not a Cache object.
return CACHE[id]
if unicode is True:
return CACHE[id]
if unicode is False:
return CACHE.get(id, unicode=False)
t = time.time()
# Open a connection with the given settings, read it and (by default)
# cache the data.
try:
data = self.open(
timeout, proxy, user_agent, referrer, authentication).read()
except socket.timeout as e:
raise URLTimeout(src=e, url=self.string)
if unicode is True:
data = u(data)
if cached:
CACHE[id] = data
if throttle:
time.sleep(max(throttle - (time.time() - t), 0))
return data |
Python | def exists(self, timeout=10):
"""Yields False if the URL generates a HTTP404NotFound error."""
try:
self.open(timeout)
except HTTP404NotFound:
return False
except HTTPError:
return True
except URLTimeout:
return True
except URLError:
return False
except:
return True
return True | def exists(self, timeout=10):
"""Yields False if the URL generates a HTTP404NotFound error."""
try:
self.open(timeout)
except HTTP404NotFound:
return False
except HTTPError:
return True
except URLTimeout:
return True
except URLError:
return False
except:
return True
return True |
Python | def mimetype(self, timeout=10):
""" Yields the MIME-type of the document at the URL, or None.
MIME is more reliable than simply checking the document extension.
You can then do: URL.mimetype in MIMETYPE_IMAGE.
"""
try:
return self.headers["content-type"].split(";")[0]
except KeyError:
return None | def mimetype(self, timeout=10):
""" Yields the MIME-type of the document at the URL, or None.
MIME is more reliable than simply checking the document extension.
You can then do: URL.mimetype in MIMETYPE_IMAGE.
"""
try:
return self.headers["content-type"].split(";")[0]
except KeyError:
return None |
Python | def download(url=u"", method=GET, query={}, timeout=10, cached=True, throttle=0, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None, unicode=False):
"""Downloads the content at the given URL (by default it will be cached
locally).
Unless unicode=False, the content is returned as a unicode string.
"""
return URL(url, method, query).download(timeout, cached, throttle, proxy, user_agent, referrer, authentication, unicode) | def download(url=u"", method=GET, query={}, timeout=10, cached=True, throttle=0, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None, unicode=False):
"""Downloads the content at the given URL (by default it will be cached
locally).
Unless unicode=False, the content is returned as a unicode string.
"""
return URL(url, method, query).download(timeout, cached, throttle, proxy, user_agent, referrer, authentication, unicode) |
Python | def bind(obj, method, function):
"""Attaches the function as a method with the given name to the given
object."""
try:
import types
setattr(obj, method, types.MethodType(function, obj))
except ImportError:
import new
setattr(obj, method, new.instancemethod(function, obj)) | def bind(obj, method, function):
"""Attaches the function as a method with the given name to the given
object."""
try:
import types
setattr(obj, method, types.MethodType(function, obj))
except ImportError:
import new
setattr(obj, method, new.instancemethod(function, obj)) |
Python | def update(self, bytes=1024):
"""Reads a number of bytes from the stream.
If a delimiter is encountered, calls Stream.parse() on the
packet.
"""
packets = []
self.buffer += self.socket.read(bytes)
self.buffer = self.buffer.split(self.delimiter, 1)
while len(self.buffer) > 1:
data = self.buffer[0]
data = self.parse(data)
if data is not None:
packets.append(data)
self.buffer = self.buffer[-1]
self.buffer = self.buffer.split(self.delimiter, 1)
self.buffer = self.buffer[-1]
self.extend(packets)
return packets | def update(self, bytes=1024):
"""Reads a number of bytes from the stream.
If a delimiter is encountered, calls Stream.parse() on the
packet.
"""
packets = []
self.buffer += self.socket.read(bytes)
self.buffer = self.buffer.split(self.delimiter, 1)
while len(self.buffer) > 1:
data = self.buffer[0]
data = self.parse(data)
if data is not None:
packets.append(data)
self.buffer = self.buffer[-1]
self.buffer = self.buffer.split(self.delimiter, 1)
self.buffer = self.buffer[-1]
self.extend(packets)
return packets |
Python | def stream(url, delimiter="\n", parse=lambda data: data, **kwargs):
"""Returns a new Stream with the given parse method."""
stream = Stream(url, delimiter, **kwargs)
bind(stream, "parse", lambda stream, data: parse(data))
return stream | def stream(url, delimiter="\n", parse=lambda data: data, **kwargs):
"""Returns a new Stream with the given parse method."""
stream = Stream(url, delimiter, **kwargs)
bind(stream, "parse", lambda stream, data: parse(data))
return stream |
Python | def strip(self, html, exclude=[], replace=blocks):
"""Returns the HTML string with all element tags (e.g. <p>) removed.
- exclude : a list of tags to keep. Element attributes are stripped.
To preserve attributes a dict of (tag name, [attribute])-items can be given.
- replace : a dictionary of (tag name, (replace_before, replace_after))-items.
By default, block-level elements are separated with linebreaks.
"""
if html is None:
return None
self._exclude = isinstance(
exclude, dict) and exclude or dict.fromkeys(exclude, [])
self._replace = replace
self._data = []
self.feed(self.clean(html))
self.close()
self.reset()
return "".join(self._data) | def strip(self, html, exclude=[], replace=blocks):
"""Returns the HTML string with all element tags (e.g. <p>) removed.
- exclude : a list of tags to keep. Element attributes are stripped.
To preserve attributes a dict of (tag name, [attribute])-items can be given.
- replace : a dictionary of (tag name, (replace_before, replace_after))-items.
By default, block-level elements are separated with linebreaks.
"""
if html is None:
return None
self._exclude = isinstance(
exclude, dict) and exclude or dict.fromkeys(exclude, [])
self._replace = replace
self._data = []
self.feed(self.clean(html))
self.close()
self.reset()
return "".join(self._data) |
Python | def strip_element(string, tag, attributes=""):
"""Removes all elements with the given tagname and attributes from the
string.
Open and close tags are kept in balance.
No HTML parser is used: strip_element(s, "a", 'class="x"') matches
'<a class="x">' or '<a href="x" class="x">' but not "<a class='x'>".
"""
s = string.lower() # Case-insensitive.
t = tag.strip("</>")
a = (" " + attributes.lower().strip()).rstrip()
i = 0
j = 0
while j >= 0:
#i = s.find("<%s%s" % (t, a), i)
m = re.search(r"<%s[^\>]*?%s" % (t, a), s[i:])
i = i + m.start() if m else -1
j = s.find("</%s>" % t, i + 1)
opened, closed = s[i:j].count("<%s" % t), 1
while opened > closed and j >= 0:
k = s.find("</%s>" % t, j + 1)
opened += s[j:k].count("<%s" % t)
closed += 1
j = k
if i < 0:
return string
if j < 0:
return string[:i]
string = string[:i] + string[j + len(t) + 3:]
s = string.lower()
return string | def strip_element(string, tag, attributes=""):
"""Removes all elements with the given tagname and attributes from the
string.
Open and close tags are kept in balance.
No HTML parser is used: strip_element(s, "a", 'class="x"') matches
'<a class="x">' or '<a href="x" class="x">' but not "<a class='x'>".
"""
s = string.lower() # Case-insensitive.
t = tag.strip("</>")
a = (" " + attributes.lower().strip()).rstrip()
i = 0
j = 0
while j >= 0:
#i = s.find("<%s%s" % (t, a), i)
m = re.search(r"<%s[^\>]*?%s" % (t, a), s[i:])
i = i + m.start() if m else -1
j = s.find("</%s>" % t, i + 1)
opened, closed = s[i:j].count("<%s" % t), 1
while opened > closed and j >= 0:
k = s.find("</%s>" % t, j + 1)
opened += s[j:k].count("<%s" % t)
closed += 1
j = k
if i < 0:
return string
if j < 0:
return string[:i]
string = string[:i] + string[j + len(t) + 3:]
s = string.lower()
return string |
Python | def collapse_spaces(string, indentation=False, replace=" "):
"""Returns a string with consecutive spaces collapsed to a single space.
Whitespace on empty lines and at the end of each line is removed.
With indentation=True, retains leading whitespace on each line.
"""
p = []
for x in string.splitlines():
n = indentation and len(x) - len(x.lstrip()) or 0
p.append(x[:n] + RE_SPACES.sub(replace, x[n:]).strip())
return "\n".join(p) | def collapse_spaces(string, indentation=False, replace=" "):
"""Returns a string with consecutive spaces collapsed to a single space.
Whitespace on empty lines and at the end of each line is removed.
With indentation=True, retains leading whitespace on each line.
"""
p = []
for x in string.splitlines():
n = indentation and len(x) - len(x.lstrip()) or 0
p.append(x[:n] + RE_SPACES.sub(replace, x[n:]).strip())
return "\n".join(p) |
Python | def collapse_tabs(string, indentation=False, replace=" "):
"""Returns a string with (consecutive) tabs replaced by a single space.
Whitespace on empty lines and at the end of each line is removed.
With indentation=True, retains leading whitespace on each line.
"""
p = []
for x in string.splitlines():
n = indentation and len(x) - len(x.lstrip()) or 0
p.append(x[:n] + RE_TABS.sub(replace, x[n:]).strip())
return "\n".join(p) | def collapse_tabs(string, indentation=False, replace=" "):
"""Returns a string with (consecutive) tabs replaced by a single space.
Whitespace on empty lines and at the end of each line is removed.
With indentation=True, retains leading whitespace on each line.
"""
p = []
for x in string.splitlines():
n = indentation and len(x) - len(x.lstrip()) or 0
p.append(x[:n] + RE_TABS.sub(replace, x[n:]).strip())
return "\n".join(p) |
Python | def collapse_linebreaks(string, threshold=1):
"""Returns a string with consecutive linebreaks collapsed to at most the
given threshold.
Whitespace on empty lines and at the end of each line is removed.
"""
n = "\n" * threshold
p = [s.rstrip() for s in string.splitlines()]
string = "\n".join(p)
string = re.sub(n + r"+", n, string)
return string | def collapse_linebreaks(string, threshold=1):
"""Returns a string with consecutive linebreaks collapsed to at most the
given threshold.
Whitespace on empty lines and at the end of each line is removed.
"""
n = "\n" * threshold
p = [s.rstrip() for s in string.splitlines()]
string = "\n".join(p)
string = re.sub(n + r"+", n, string)
return string |
Python | def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
"""Returns a list of results from Google for the given query.
- type : SEARCH,
- start: maximum 100 results => start 1-10 with count=10,
- count: maximum 10,
There is a daily limit of 10,000 queries. Google Custom Search is a paid service.
"""
if type != SEARCH:
raise SearchEngineTypeError
if not query or count < 1 or start < 1 or start > (100 / count):
return Results(GOOGLE, query, type)
# 1) Create request URL.
url = URL(GOOGLE, query={
"key": self.license or GOOGLE_LICENSE,
"cx": GOOGLE_CUSTOM_SEARCH_ENGINE,
"q": query,
"start": 1 + (start - 1) * count,
"num": min(count, 10),
"alt": "json"
})
# 2) Restrict language.
if self.language is not None:
url.query["lr"] = "lang_" + self.language
# 3) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
if data.get("error", {}).get("code") == 403:
raise SearchEngineLimitError
results = Results(GOOGLE, query, type)
results.total = int(
data.get("queries", {}).get("request", [{}])[0].get("totalResults") or 0)
for x in data.get("items", []):
r = Result(url=None)
r.url = self.format(x.get("link"))
r.title = self.format(x.get("title"))
r.text = self.format(
x.get("htmlSnippet").replace("<br> ", "").replace("<b>...</b>", "..."))
r.language = self.language or ""
r.date = ""
if not r.date:
# Google Search results can start with a date (parsed from the
# content):
m = RE_GOOGLE_DATE.match(r.text)
if m:
r.date = m.group(1)
r.text = "..." + r.text[len(m.group(0)):]
results.append(r)
return results | def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
"""Returns a list of results from Google for the given query.
- type : SEARCH,
- start: maximum 100 results => start 1-10 with count=10,
- count: maximum 10,
There is a daily limit of 10,000 queries. Google Custom Search is a paid service.
"""
if type != SEARCH:
raise SearchEngineTypeError
if not query or count < 1 or start < 1 or start > (100 / count):
return Results(GOOGLE, query, type)
# 1) Create request URL.
url = URL(GOOGLE, query={
"key": self.license or GOOGLE_LICENSE,
"cx": GOOGLE_CUSTOM_SEARCH_ENGINE,
"q": query,
"start": 1 + (start - 1) * count,
"num": min(count, 10),
"alt": "json"
})
# 2) Restrict language.
if self.language is not None:
url.query["lr"] = "lang_" + self.language
# 3) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
if data.get("error", {}).get("code") == 403:
raise SearchEngineLimitError
results = Results(GOOGLE, query, type)
results.total = int(
data.get("queries", {}).get("request", [{}])[0].get("totalResults") or 0)
for x in data.get("items", []):
r = Result(url=None)
r.url = self.format(x.get("link"))
r.title = self.format(x.get("title"))
r.text = self.format(
x.get("htmlSnippet").replace("<br> ", "").replace("<b>...</b>", "..."))
r.language = self.language or ""
r.date = ""
if not r.date:
# Google Search results can start with a date (parsed from the
# content):
m = RE_GOOGLE_DATE.match(r.text)
if m:
r.date = m.group(1)
r.text = "..." + r.text[len(m.group(0)):]
results.append(r)
return results |
Python | def translate(self, string, input="en", output="fr", **kwargs):
"""Returns the translation of the given string in the desired output
language.
Google Translate is a paid service, license without billing
raises HTTP401Authentication.
"""
url = URL("https://www.googleapis.com/language/translate/v2?", method=GET, query={
"key": self.license or GOOGLE_LICENSE,
"q": string, # 1000 characters maximum
"source": input,
"target": output
})
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
if input == output:
return string
try:
data = url.download(**kwargs)
except HTTP403Forbidden:
raise HTTP401Authentication(
"Google translate API is a paid service")
data = json.loads(data)
data = data.get("data", {}).get(
"translations", [{}])[0].get("translatedText", "")
data = decode_entities(data)
return u(data) | def translate(self, string, input="en", output="fr", **kwargs):
"""Returns the translation of the given string in the desired output
language.
Google Translate is a paid service, license without billing
raises HTTP401Authentication.
"""
url = URL("https://www.googleapis.com/language/translate/v2?", method=GET, query={
"key": self.license or GOOGLE_LICENSE,
"q": string, # 1000 characters maximum
"source": input,
"target": output
})
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
if input == output:
return string
try:
data = url.download(**kwargs)
except HTTP403Forbidden:
raise HTTP401Authentication(
"Google translate API is a paid service")
data = json.loads(data)
data = data.get("data", {}).get(
"translations", [{}])[0].get("translatedText", "")
data = decode_entities(data)
return u(data) |
Python | def identify(self, string, **kwargs):
""" Returns a (language, confidence)-tuple for the given string.
Google Translate is a paid service, license without billing raises HTTP401Authentication.
"""
url = URL("https://www.googleapis.com/language/translate/v2/detect?", method=GET, query={
"key": self.license or GOOGLE_LICENSE,
"q": string[:1000]
})
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(**kwargs)
except HTTP403Forbidden:
raise HTTP401Authentication(
"Google translate API is a paid service")
data = json.loads(data)
data = data.get("data", {}).get("detections", [[{}]])[0][0]
data = u(data.get("language")), float(data.get("confidence"))
return data | def identify(self, string, **kwargs):
""" Returns a (language, confidence)-tuple for the given string.
Google Translate is a paid service, license without billing raises HTTP401Authentication.
"""
url = URL("https://www.googleapis.com/language/translate/v2/detect?", method=GET, query={
"key": self.license or GOOGLE_LICENSE,
"q": string[:1000]
})
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(**kwargs)
except HTTP403Forbidden:
raise HTTP401Authentication(
"Google translate API is a paid service")
data = json.loads(data)
data = data.get("data", {}).get("detections", [[{}]])[0][0]
data = u(data.get("language")), float(data.get("confidence"))
return data |
Python | def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
"""Returns a list of results from Yahoo for the given query.
- type : SEARCH, IMAGE or NEWS,
- start: maximum 1000 results => start 1-100 with count=10, 1000/count,
- count: maximum 50, or 35 for images.
There is no daily limit, however Yahoo BOSS is a paid service.
"""
if type not in (SEARCH, IMAGE, NEWS):
raise SearchEngineTypeError
if type == SEARCH:
url = YAHOO + "web"
if type == IMAGE:
url = YAHOO + "images"
if type == NEWS:
url = YAHOO + "news"
if not query or count < 1 or start < 1 or start > 1000 / count:
return Results(YAHOO, query, type)
# 1) Create request URL.
url = URL(url, method=GET, query={
"q": query.replace(" ", "+"),
"start": 1 + (start - 1) * count,
"count": min(count, type == IMAGE and 35 or 50),
"format": "json"
})
# 2) Restrict language.
if self.language is not None:
market = locale.market(self.language)
if market:
url.query["market"] = market.lower()
# 3) Authenticate.
url = self._authenticate(url)
# 4) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(cached=cached, **kwargs)
except HTTP401Authentication:
raise HTTP401Authentication(
"Yahoo %s API is a paid service" % type)
except HTTP403Forbidden:
raise SearchEngineLimitError
data = json.loads(data)
data = data.get("bossresponse") or {}
data = data.get(
{SEARCH: "web", IMAGE: "images", NEWS: "news"}[type], {})
results = Results(YAHOO, query, type)
results.total = int(data.get("totalresults") or 0)
for x in data.get("results", []):
r = Result(url=None)
r.url = self.format(x.get("url", x.get("clickurl")))
r.title = self.format(x.get("title"))
r.text = self.format(x.get("abstract"))
r.date = self.format(x.get("date"))
r.author = self.format(x.get("source"))
r.language = self.format(x.get("language") and
x.get("language").split(" ")[0] or self.language or "")
results.append(r)
return results | def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
"""Returns a list of results from Yahoo for the given query.
- type : SEARCH, IMAGE or NEWS,
- start: maximum 1000 results => start 1-100 with count=10, 1000/count,
- count: maximum 50, or 35 for images.
There is no daily limit, however Yahoo BOSS is a paid service.
"""
if type not in (SEARCH, IMAGE, NEWS):
raise SearchEngineTypeError
if type == SEARCH:
url = YAHOO + "web"
if type == IMAGE:
url = YAHOO + "images"
if type == NEWS:
url = YAHOO + "news"
if not query or count < 1 or start < 1 or start > 1000 / count:
return Results(YAHOO, query, type)
# 1) Create request URL.
url = URL(url, method=GET, query={
"q": query.replace(" ", "+"),
"start": 1 + (start - 1) * count,
"count": min(count, type == IMAGE and 35 or 50),
"format": "json"
})
# 2) Restrict language.
if self.language is not None:
market = locale.market(self.language)
if market:
url.query["market"] = market.lower()
# 3) Authenticate.
url = self._authenticate(url)
# 4) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(cached=cached, **kwargs)
except HTTP401Authentication:
raise HTTP401Authentication(
"Yahoo %s API is a paid service" % type)
except HTTP403Forbidden:
raise SearchEngineLimitError
data = json.loads(data)
data = data.get("bossresponse") or {}
data = data.get(
{SEARCH: "web", IMAGE: "images", NEWS: "news"}[type], {})
results = Results(YAHOO, query, type)
results.total = int(data.get("totalresults") or 0)
for x in data.get("results", []):
r = Result(url=None)
r.url = self.format(x.get("url", x.get("clickurl")))
r.title = self.format(x.get("title"))
r.text = self.format(x.get("abstract"))
r.date = self.format(x.get("date"))
r.author = self.format(x.get("source"))
r.language = self.format(x.get("language") and
x.get("language").split(" ")[0] or self.language or "")
results.append(r)
return results |
Python | def search(self, query, type=SEARCH, start=None, count=None, sort=RELEVANCY, size=None, cached=True, **kwargs):
"""" Returns a list of results from DuckDuckGo for the given query."""
if type != SEARCH:
raise SearchEngineTypeError
# 1) Construct request URL.
url = URL(DUCKDUCKGO, method=GET, query={
"q": query,
"o": "json"
})
# 2) Restrict language.
if type == SEARCH and self.language is not None:
url.query["kl"] = self.language
# 3) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
results = Results(DUCKDUCKGO, query, type)
results.total = None
for x in data.get("Results", []):
if x.get("FirstURL"):
r = Result(url=None)
# Parse official website link.
r.url = self.format(x.get("FirstURL"))
r.title = self.format(data.get("Heading"))
r.text = self.format(data.get("Abstract"))
r.author = self.format(data.get("AbstractSource"))
r.type = self.format(REFERENCE)
results.append(r)
for topic in data.get("RelatedTopics", []):
for x in topic.get("Topics", [topic]):
r = Result(url=None)
r.url = x.get("FirstURL")
# Parse title and type from URL (e.g.,
# http://duckduckgo.com/d/Cats?kl=en).
m = re.match(
r"^http://duckduckgo.com/([a-z]/)?(.*?)(\?|$)", r.url)
# Parse title: "Cats".
s1 = m and m.group(2) or "" # Title: "Cats"
s1 = u(decode_url(s1.encode("utf-8")))
s1 = s1.strip().replace("_", " ")
s1 = s1[:1].upper() + s1[1:]
# Parse description; the part before the first "-" or "," was
# the link.
s2 = x.get("Text", "").strip()
s2 = re.sub(r" +", " ", s2)
s2 = s2[:1].upper() + s2[1:] or ""
s2 = s2.startswith(s1) \
and "<a href=\"%s\">%s</a>%s" % (r.url, s1, s2[len(s1):]) \
or re.sub(r"^(.*?)( - | or |, )(.*?)", "<a href=\"%s\">\\1</a>\\2\\3" % r.url, s2)
# Parse type: "d/" => "definition".
s3 = m and m.group(1) or ""
s3 = {"c": CATEGORY, "d": DEFINITION}.get(s3.rstrip("/"), "")
s3 = topic.get("Name", "").lower() or s3
s3 = re.sub("^in ", "", s3)
# Format result.
r.url = self.format(r.url)
r.title = self.format(s1)
r.text = self.format(s2)
r.type = self.format(s3)
results.append(r)
return results | def search(self, query, type=SEARCH, start=None, count=None, sort=RELEVANCY, size=None, cached=True, **kwargs):
"""" Returns a list of results from DuckDuckGo for the given query."""
if type != SEARCH:
raise SearchEngineTypeError
# 1) Construct request URL.
url = URL(DUCKDUCKGO, method=GET, query={
"q": query,
"o": "json"
})
# 2) Restrict language.
if type == SEARCH and self.language is not None:
url.query["kl"] = self.language
# 3) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
results = Results(DUCKDUCKGO, query, type)
results.total = None
for x in data.get("Results", []):
if x.get("FirstURL"):
r = Result(url=None)
# Parse official website link.
r.url = self.format(x.get("FirstURL"))
r.title = self.format(data.get("Heading"))
r.text = self.format(data.get("Abstract"))
r.author = self.format(data.get("AbstractSource"))
r.type = self.format(REFERENCE)
results.append(r)
for topic in data.get("RelatedTopics", []):
for x in topic.get("Topics", [topic]):
r = Result(url=None)
r.url = x.get("FirstURL")
# Parse title and type from URL (e.g.,
# http://duckduckgo.com/d/Cats?kl=en).
m = re.match(
r"^http://duckduckgo.com/([a-z]/)?(.*?)(\?|$)", r.url)
# Parse title: "Cats".
s1 = m and m.group(2) or "" # Title: "Cats"
s1 = u(decode_url(s1.encode("utf-8")))
s1 = s1.strip().replace("_", " ")
s1 = s1[:1].upper() + s1[1:]
# Parse description; the part before the first "-" or "," was
# the link.
s2 = x.get("Text", "").strip()
s2 = re.sub(r" +", " ", s2)
s2 = s2[:1].upper() + s2[1:] or ""
s2 = s2.startswith(s1) \
and "<a href=\"%s\">%s</a>%s" % (r.url, s1, s2[len(s1):]) \
or re.sub(r"^(.*?)( - | or |, )(.*?)", "<a href=\"%s\">\\1</a>\\2\\3" % r.url, s2)
# Parse type: "d/" => "definition".
s3 = m and m.group(1) or ""
s3 = {"c": CATEGORY, "d": DEFINITION}.get(s3.rstrip("/"), "")
s3 = topic.get("Name", "").lower() or s3
s3 = re.sub("^in ", "", s3)
# Format result.
r.url = self.format(r.url)
r.title = self.format(s1)
r.text = self.format(s2)
r.type = self.format(s3)
results.append(r)
return results |
Python | def answer(self, string, **kwargs):
"""Returns a DuckDuckGo answer for the given string (e.g., math,
spelling, ...)"""
url = URL(DUCKDUCKGO, method=GET, query={
"q": string,
"o": "json"
})
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(**kwargs)
data = json.loads(data)
data = data.get(kwargs.get("field", "Answer"))
return u(data) | def answer(self, string, **kwargs):
"""Returns a DuckDuckGo answer for the given string (e.g., math,
spelling, ...)"""
url = URL(DUCKDUCKGO, method=GET, query={
"q": string,
"o": "json"
})
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(**kwargs)
data = json.loads(data)
data = data.get(kwargs.get("field", "Answer"))
return u(data) |
Python | def spelling(self, string):
"""Returns a list of spelling suggestions for the given string."""
s = self.answer("spell " + string, cached=True)
s = re.findall(r"<a.*?>(.*?)</a>", s)
return s | def spelling(self, string):
"""Returns a list of spelling suggestions for the given string."""
s = self.answer("spell " + string, cached=True)
s = re.findall(r"<a.*?>(.*?)</a>", s)
return s |
Python | def definition(self, string):
"""Returns a dictionary definition for the given string."""
s = self.answer(string, field="Definition", cached=True)
s = re.sub(r"^.*? definition: ", "", s)
s = re.sub(r"(^'''.*?''' |^)(.)(.*?)$",
lambda m: m.group(1) + m.group(2).upper() + m.group(3), s)
return s | def definition(self, string):
"""Returns a dictionary definition for the given string."""
s = self.answer(string, field="Definition", cached=True)
s = re.sub(r"^.*? definition: ", "", s)
s = re.sub(r"(^'''.*?''' |^)(.)(.*?)$",
lambda m: m.group(1) + m.group(2).upper() + m.group(3), s)
return s |
Python | def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=False, **kwargs):
"""Returns a list of results from Twitter for the given query.
- type : SEARCH,
- start: Result.id or int,
- count: maximum 100.
There is a limit of 150+ queries per 15 minutes.
"""
if type != SEARCH:
raise SearchEngineTypeError
if not query or count < 1 or (isinstance(start, (int, long, float)) and start < 1):
return Results(TWITTER, query, type)
if not isinstance(start, (int, long, float)):
id = int(start) - 1 if start and start.isdigit() else ""
else:
if start == 1:
self._pagination = {}
if start <= 10000:
id = (query, kwargs.get("geo"), kwargs.get(
"date"), int(start) - 1, count)
id = self._pagination.get(id, "")
else:
id = int(start) - 1
# 1) Construct request URL.
url = URL(TWITTER + "search/tweets.json?", method=GET)
url.query = {
"q": query,
"max_id": id,
"count": min(count, 100)
}
# 2) Restrict location with geo=(latitude, longitude, radius).
# It can also be a (latitude, longitude)-tuple with default radius
# "10km".
if "geo" in kwargs:
url.query["geocode"] = ",".join(
(map(str, kwargs.pop("geo")) + ["10km"])[:3])
# 3) Restrict most recent with date="YYYY-MM-DD".
# Only older tweets are returned.
if "date" in kwargs:
url.query["until"] = kwargs.pop("date")
# 4) Restrict language.
url.query["lang"] = self.language or ""
# 5) Authenticate.
url = self._authenticate(url)
# 6) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(cached=cached, **kwargs)
except HTTP420Error:
raise SearchEngineLimitError
except HTTP429TooMayRequests:
raise SearchEngineLimitError
data = json.loads(data)
results = Results(TWITTER, query, type)
results.total = None
for x in data.get("statuses", []):
r = Result(url=None)
r.id = self.format(x.get("id_str"))
r.url = self.format(
TWITTER_STATUS % (x.get("user", {}).get("screen_name"), x.get("id_str")))
r.text = self.format(x.get("text"))
r.date = self.format(x.get("created_at"))
r.author = self.format(x.get("user", {}).get("screen_name"))
r.language = self.format(
x.get("metadata", {}).get("iso_language_code"))
r.shares = self.format(x.get("retweet_count", 0))
# Profile picture URL.
r.profile = self.format(x.get("user", {}).get("profile_image_url"))
# Fetch original status if retweet is truncated (i.e., ends with
# "...").
rt = x.get("retweeted_status", None)
if rt:
comment = re.search(r"^(.*? )RT", r.text)
comment = comment.group(1) if comment else ""
r.text = self.format(
"RT @%s: %s" % (rt["user"]["screen_name"], rt["text"]))
results.append(r)
# Twitter.search(start=id, count=10) takes a tweet.id,
# and returns 10 results that are older than this id.
# In the past, start took an int used for classic pagination.
# However, new tweets may arrive quickly,
# so that by the time Twitter.search(start=2) is called,
# it will yield results from page 1 (or even newer results).
# For backward compatibility, we keep page cache,
# that remembers the last id for a "page" for a given query,
# when called in a loop.
#
# Store the last id retrieved.
# If search() is called again with start+1, start from this id.
if isinstance(start, (int, long, float)):
k = (query, kwargs.get("geo"), kwargs.get(
"date"), int(start), count)
if results:
self._pagination[k] = str(int(results[-1].id) - 1)
else:
self._pagination[k] = id
return results | def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=False, **kwargs):
"""Returns a list of results from Twitter for the given query.
- type : SEARCH,
- start: Result.id or int,
- count: maximum 100.
There is a limit of 150+ queries per 15 minutes.
"""
if type != SEARCH:
raise SearchEngineTypeError
if not query or count < 1 or (isinstance(start, (int, long, float)) and start < 1):
return Results(TWITTER, query, type)
if not isinstance(start, (int, long, float)):
id = int(start) - 1 if start and start.isdigit() else ""
else:
if start == 1:
self._pagination = {}
if start <= 10000:
id = (query, kwargs.get("geo"), kwargs.get(
"date"), int(start) - 1, count)
id = self._pagination.get(id, "")
else:
id = int(start) - 1
# 1) Construct request URL.
url = URL(TWITTER + "search/tweets.json?", method=GET)
url.query = {
"q": query,
"max_id": id,
"count": min(count, 100)
}
# 2) Restrict location with geo=(latitude, longitude, radius).
# It can also be a (latitude, longitude)-tuple with default radius
# "10km".
if "geo" in kwargs:
url.query["geocode"] = ",".join(
(map(str, kwargs.pop("geo")) + ["10km"])[:3])
# 3) Restrict most recent with date="YYYY-MM-DD".
# Only older tweets are returned.
if "date" in kwargs:
url.query["until"] = kwargs.pop("date")
# 4) Restrict language.
url.query["lang"] = self.language or ""
# 5) Authenticate.
url = self._authenticate(url)
# 6) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(cached=cached, **kwargs)
except HTTP420Error:
raise SearchEngineLimitError
except HTTP429TooMayRequests:
raise SearchEngineLimitError
data = json.loads(data)
results = Results(TWITTER, query, type)
results.total = None
for x in data.get("statuses", []):
r = Result(url=None)
r.id = self.format(x.get("id_str"))
r.url = self.format(
TWITTER_STATUS % (x.get("user", {}).get("screen_name"), x.get("id_str")))
r.text = self.format(x.get("text"))
r.date = self.format(x.get("created_at"))
r.author = self.format(x.get("user", {}).get("screen_name"))
r.language = self.format(
x.get("metadata", {}).get("iso_language_code"))
r.shares = self.format(x.get("retweet_count", 0))
# Profile picture URL.
r.profile = self.format(x.get("user", {}).get("profile_image_url"))
# Fetch original status if retweet is truncated (i.e., ends with
# "...").
rt = x.get("retweeted_status", None)
if rt:
comment = re.search(r"^(.*? )RT", r.text)
comment = comment.group(1) if comment else ""
r.text = self.format(
"RT @%s: %s" % (rt["user"]["screen_name"], rt["text"]))
results.append(r)
# Twitter.search(start=id, count=10) takes a tweet.id,
# and returns 10 results that are older than this id.
# In the past, start took an int used for classic pagination.
# However, new tweets may arrive quickly,
# so that by the time Twitter.search(start=2) is called,
# it will yield results from page 1 (or even newer results).
# For backward compatibility, we keep page cache,
# that remembers the last id for a "page" for a given query,
# when called in a loop.
#
# Store the last id retrieved.
# If search() is called again with start+1, start from this id.
if isinstance(start, (int, long, float)):
k = (query, kwargs.get("geo"), kwargs.get(
"date"), int(start), count)
if results:
self._pagination[k] = str(int(results[-1].id) - 1)
else:
self._pagination[k] = id
return results |
Python | def profile(self, query, start=1, count=10, **kwargs):
"""Returns a list of results for the given author id, alias or search
query."""
# 1) Construct request URL.
url = URL(TWITTER + "users/search.json?", method=GET, query={
"q": query,
"page": start,
"count": count
})
url = self._authenticate(url)
# 2) Parse JSON response.
kwargs.setdefault("cached", True)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = URL(url).download(**kwargs)
data = json.loads(data)
except HTTP400BadRequest:
return []
return [
Result(url="https://www.twitter.com/" + x.get("screen_name", ""),
id=x.get("id_str", ""), # 14898655
handle=x.get("screen_name", ""), # tom_de_smedt
name=x.get("name", ""), # Tom De Smedt
# Artist, scientist, software engineer
text=x.get("description", ""),
language=x.get("lang", ""), # en
date=x.get("created_at"), # Sun May 10 10:00:00
locale=x.get("location", ""), # Belgium
# http://pbs.twimg.com/...
picture=x.get("profile_image_url", ""),
friends=int(x.get("followers_count", 0)), # 100
posts=int(x.get("statuses_count", 0)) # 100
) for x in data
] | def profile(self, query, start=1, count=10, **kwargs):
"""Returns a list of results for the given author id, alias or search
query."""
# 1) Construct request URL.
url = URL(TWITTER + "users/search.json?", method=GET, query={
"q": query,
"page": start,
"count": count
})
url = self._authenticate(url)
# 2) Parse JSON response.
kwargs.setdefault("cached", True)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = URL(url).download(**kwargs)
data = json.loads(data)
except HTTP400BadRequest:
return []
return [
Result(url="https://www.twitter.com/" + x.get("screen_name", ""),
id=x.get("id_str", ""), # 14898655
handle=x.get("screen_name", ""), # tom_de_smedt
name=x.get("name", ""), # Tom De Smedt
# Artist, scientist, software engineer
text=x.get("description", ""),
language=x.get("lang", ""), # en
date=x.get("created_at"), # Sun May 10 10:00:00
locale=x.get("location", ""), # Belgium
# http://pbs.twimg.com/...
picture=x.get("profile_image_url", ""),
friends=int(x.get("followers_count", 0)), # 100
posts=int(x.get("statuses_count", 0)) # 100
) for x in data
] |
Python | def trends(self, **kwargs):
"""Returns a list with 10 trending topics on Twitter."""
# 1) Construct request URL.
url = URL("https://api.twitter.com/1.1/trends/place.json?id=1")
url = self._authenticate(url)
# 2) Parse JSON response.
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(**kwargs)
data = json.loads(data)
except HTTP400BadRequest:
return []
return [u(x.get("name")) for x in data[0].get("trends", [])] | def trends(self, **kwargs):
"""Returns a list with 10 trending topics on Twitter."""
# 1) Construct request URL.
url = URL("https://api.twitter.com/1.1/trends/place.json?id=1")
url = self._authenticate(url)
# 2) Parse JSON response.
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(**kwargs)
data = json.loads(data)
except HTTP400BadRequest:
return []
return [u(x.get("name")) for x in data[0].get("trends", [])] |
Python | def stream(self, query, **kwargs):
"""Returns a live stream of Result objects for the given query."""
url = URL(TWITTER_STREAM)
url.query["track"] = query
url = self._authenticate(url)
return TwitterStream(url, delimiter="\n", format=self.format, **kwargs) | def stream(self, query, **kwargs):
"""Returns a live stream of Result objects for the given query."""
url = URL(TWITTER_STREAM)
url.query["track"] = query
url = self._authenticate(url)
return TwitterStream(url, delimiter="\n", format=self.format, **kwargs) |
Python | def parse(self, data):
"""TwitterStream.queue will populate with Result objects as
TwitterStream.update() is called iteratively."""
if data.strip():
x = json.loads(data)
r = Result(url=None)
r.id = self.format(x.get("id_str"))
r.url = self.format(
TWITTER_STATUS % (x.get("user", {}).get("screen_name"), x.get("id_str")))
r.text = self.format(x.get("text"))
r.date = self.format(x.get("created_at"))
r.author = self.format(x.get("user", {}).get("screen_name"))
r.language = self.format(
x.get("metadata", {}).get("iso_language_code"))
r.shares = self.format(x.get("retweet_count", 0))
# Profile picture URL.
r.profile = self.format(x.get("user", {}).get("profile_image_url"))
# Fetch original status if retweet is truncated (i.e., ends with
# "...").
rt = x.get("retweeted_status", None)
if rt:
comment = re.search(r"^(.*? )RT", r.text)
comment = comment.group(1) if comment else ""
r.text = self.format(
"RT @%s: %s" % (rt["user"]["screen_name"], rt["text"]))
return r | def parse(self, data):
"""TwitterStream.queue will populate with Result objects as
TwitterStream.update() is called iteratively."""
if data.strip():
x = json.loads(data)
r = Result(url=None)
r.id = self.format(x.get("id_str"))
r.url = self.format(
TWITTER_STATUS % (x.get("user", {}).get("screen_name"), x.get("id_str")))
r.text = self.format(x.get("text"))
r.date = self.format(x.get("created_at"))
r.author = self.format(x.get("user", {}).get("screen_name"))
r.language = self.format(
x.get("metadata", {}).get("iso_language_code"))
r.shares = self.format(x.get("retweet_count", 0))
# Profile picture URL.
r.profile = self.format(x.get("user", {}).get("profile_image_url"))
# Fetch original status if retweet is truncated (i.e., ends with
# "...").
rt = x.get("retweeted_status", None)
if rt:
comment = re.search(r"^(.*? )RT", r.text)
comment = comment.group(1) if comment else ""
r.text = self.format(
"RT @%s: %s" % (rt["user"]["screen_name"], rt["text"]))
return r |
Python | def articles(self, **kwargs):
"""Returns an iterator over all MediaWikiArticle objects.
Optional parameters can include those passed to
MediaWiki.index(), MediaWiki.search() and URL.download().
"""
for title in self.index(**kwargs):
yield self.search(title, **kwargs) | def articles(self, **kwargs):
"""Returns an iterator over all MediaWikiArticle objects.
Optional parameters can include those passed to
MediaWiki.index(), MediaWiki.search() and URL.download().
"""
for title in self.index(**kwargs):
yield self.search(title, **kwargs) |
Python | def index(self, namespace=0, start=None, count=100, cached=True, **kwargs):
"""Returns an iterator over all article titles (for a given namespace
id)."""
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
# Fetch article titles (default) or a custom id.
id = kwargs.pop("_id", "title")
# Loop endlessly (= until the last request no longer yields an "apcontinue").
# See: http://www.mediawiki.org/wiki/API:Allpages
while start != -1:
url = URL(self._url, method=GET, query={
"action": "query",
"list": "allpages",
"apnamespace": namespace,
"apfrom": start or "",
"aplimit": min(count, 500),
"apfilterredir": "nonredirects",
"format": "json"
})
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
for x in data.get("query", {}).get("allpages", {}):
if x.get(id):
yield x[id]
start = data.get("query-continue", {}).get("allpages", {})
start = start.get("apcontinue", start.get("apfrom", -1))
raise StopIteration | def index(self, namespace=0, start=None, count=100, cached=True, **kwargs):
"""Returns an iterator over all article titles (for a given namespace
id)."""
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
# Fetch article titles (default) or a custom id.
id = kwargs.pop("_id", "title")
# Loop endlessly (= until the last request no longer yields an "apcontinue").
# See: http://www.mediawiki.org/wiki/API:Allpages
while start != -1:
url = URL(self._url, method=GET, query={
"action": "query",
"list": "allpages",
"apnamespace": namespace,
"apfrom": start or "",
"aplimit": min(count, 500),
"apfilterredir": "nonredirects",
"format": "json"
})
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
for x in data.get("query", {}).get("allpages", {}):
if x.get(id):
yield x[id]
start = data.get("query-continue", {}).get("allpages", {})
start = start.get("apcontinue", start.get("apfrom", -1))
raise StopIteration |
Python | def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" With type=SEARCH, returns a MediaWikiArticle for the given query (case-sensitive).
With type=ALL, returns a list of results.
Each result.title is the title of an article that contains the given query.
"""
if type not in (SEARCH, ALL, "*"):
raise SearchEngineTypeError
if type == SEARCH: # Backwards compatibility.
return self.article(query, cached=cached, **kwargs)
if not query or start < 1 or count < 1:
return Results(self._url, query, type)
# 1) Construct request URL (e.g., Wikipedia for a given language).
url = URL(self._url, method=GET, query={
"action": "query",
"list": "search",
"srsearch": query,
"sroffset": (start - 1) * count,
"srlimit": min(count, 100),
"srprop": "snippet",
"format": "json"
})
# 2) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
data = data.get("query", {})
results = Results(self._url, query, type)
results.total = int(data.get("searchinfo", {}).get("totalhits", 0))
for x in data.get("search", []):
u = "http://%s/wiki/%s" % (URL(self._url).domain,
x.get("title").replace(" ", "_"))
r = Result(url=u)
r.id = self.format(x.get("title"))
r.title = self.format(x.get("title"))
r.text = self.format(plaintext(x.get("snippet")))
results.append(r)
return results | def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" With type=SEARCH, returns a MediaWikiArticle for the given query (case-sensitive).
With type=ALL, returns a list of results.
Each result.title is the title of an article that contains the given query.
"""
if type not in (SEARCH, ALL, "*"):
raise SearchEngineTypeError
if type == SEARCH: # Backwards compatibility.
return self.article(query, cached=cached, **kwargs)
if not query or start < 1 or count < 1:
return Results(self._url, query, type)
# 1) Construct request URL (e.g., Wikipedia for a given language).
url = URL(self._url, method=GET, query={
"action": "query",
"list": "search",
"srsearch": query,
"sroffset": (start - 1) * count,
"srlimit": min(count, 100),
"srprop": "snippet",
"format": "json"
})
# 2) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
data = data.get("query", {})
results = Results(self._url, query, type)
results.total = int(data.get("searchinfo", {}).get("totalhits", 0))
for x in data.get("search", []):
u = "http://%s/wiki/%s" % (URL(self._url).domain,
x.get("title").replace(" ", "_"))
r = Result(url=u)
r.id = self.format(x.get("title"))
r.title = self.format(x.get("title"))
r.text = self.format(plaintext(x.get("snippet")))
results.append(r)
return results |
Python | def article(self, query, cached=True, **kwargs):
""" Returns a MediaWikiArticle for the given query.
The query is case-sensitive, for example on Wikipedia:
- "tiger" = Panthera tigris,
- "TIGER" = Topologically Integrated Geographic Encoding and Referencing.
"""
url = URL(self._url, method=GET, query={
"action": "parse",
"page": query.replace(" ", "_"),
"redirects": 1,
"format": "json"
})
kwargs.setdefault("unicode", True)
# Parsing the article takes some time.
kwargs.setdefault("timeout", 30)
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
data = data.get("parse", {})
a = self._parse_article(data, query=query)
a = self._parse_article_sections(a, data)
a = self._parse_article_section_structure(a)
if not a.html or "id=\"noarticletext\"" in a.html:
return None
return a | def article(self, query, cached=True, **kwargs):
""" Returns a MediaWikiArticle for the given query.
The query is case-sensitive, for example on Wikipedia:
- "tiger" = Panthera tigris,
- "TIGER" = Topologically Integrated Geographic Encoding and Referencing.
"""
url = URL(self._url, method=GET, query={
"action": "parse",
"page": query.replace(" ", "_"),
"redirects": 1,
"format": "json"
})
kwargs.setdefault("unicode", True)
# Parsing the article takes some time.
kwargs.setdefault("timeout", 30)
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
data = data.get("parse", {})
a = self._parse_article(data, query=query)
a = self._parse_article_sections(a, data)
a = self._parse_article_section_structure(a)
if not a.html or "id=\"noarticletext\"" in a.html:
return None
return a |
Python | def _plaintext(self, string, **kwargs):
""" Strips HTML tags, whitespace and wiki markup from the HTML source, including:
metadata, info box, table of contents, annotations, thumbnails, disambiguation link.
This is called internally from MediaWikiArticle.string.
"""
s = string
# Strip meta <table> elements.
# Table of contents.
s = strip_element(s, "table", "id=\"toc")
s = strip_element(s, "table", "class=\"infobox") # Infobox.
s = strip_element(s, "table", "class=\"navbox") # Navbox.
s = strip_element(s, "table", "class=\"mbox") # Message.
s = strip_element(s, "table", "class=\"metadata") # Metadata.
s = strip_element(s, "table", "class=\".*?wikitable") # Table.
# Table (usually footer).
s = strip_element(s, "table", "class=\"toc")
# Strip meta <div> elements.
# Table of contents.
s = strip_element(s, "div", "id=\"toc")
s = strip_element(s, "div", "class=\"infobox") # Infobox.
s = strip_element(s, "div", "class=\"navbox") # Navbox.
s = strip_element(s, "div", "class=\"mbox") # Message.
s = strip_element(s, "div", "class=\"metadata") # Metadata.
s = strip_element(s, "div", "id=\"annotation") # Annotations.
# Disambiguation message.
s = strip_element(s, "div", "class=\"dablink")
s = strip_element(s, "div", "class=\"magnify") # Thumbnails.
# Thumbnail captions.
s = strip_element(s, "div", "class=\"thumb ")
s = strip_element(s, "div", "class=\"barbox") # Bar charts.
# Hidden from print.
s = strip_element(s, "div", "class=\"noprint")
s = strip_element(s, "sup", "class=\"noprint")
# Strip absolute elements (don't know their position).
s = strip_element(s, "div", "style=\"position:absolute")
# Strip meta <span> elements.
s = strip_element(s, "span", "class=\"error")
# Strip math formulas, add [math] placeholder.
# LaTex math images.
s = re.sub(r"<img class=\"tex\".*?/>", "[math]", s)
s = plaintext(s, **kwargs)
# Strip [edit] link (language dependent.)
s = re.sub(r"\[edit\]\s*", "", s)
s = re.sub(r"\[%s\]\s*" % {
"en": "edit",
"es": u"editar código",
"de": "Bearbeiten",
"fr": "modifier le code",
"it": "modifica sorgente",
"nl": "bewerken",
}.get(self.language, "edit"), "", s)
# Insert space before inline references.
s = s.replace("[", " [").replace(" [", " [")
# Strip inline references.
#s = re.sub(r" \[[0-9]+\]", "", s)
return s | def _plaintext(self, string, **kwargs):
""" Strips HTML tags, whitespace and wiki markup from the HTML source, including:
metadata, info box, table of contents, annotations, thumbnails, disambiguation link.
This is called internally from MediaWikiArticle.string.
"""
s = string
# Strip meta <table> elements.
# Table of contents.
s = strip_element(s, "table", "id=\"toc")
s = strip_element(s, "table", "class=\"infobox") # Infobox.
s = strip_element(s, "table", "class=\"navbox") # Navbox.
s = strip_element(s, "table", "class=\"mbox") # Message.
s = strip_element(s, "table", "class=\"metadata") # Metadata.
s = strip_element(s, "table", "class=\".*?wikitable") # Table.
# Table (usually footer).
s = strip_element(s, "table", "class=\"toc")
# Strip meta <div> elements.
# Table of contents.
s = strip_element(s, "div", "id=\"toc")
s = strip_element(s, "div", "class=\"infobox") # Infobox.
s = strip_element(s, "div", "class=\"navbox") # Navbox.
s = strip_element(s, "div", "class=\"mbox") # Message.
s = strip_element(s, "div", "class=\"metadata") # Metadata.
s = strip_element(s, "div", "id=\"annotation") # Annotations.
# Disambiguation message.
s = strip_element(s, "div", "class=\"dablink")
s = strip_element(s, "div", "class=\"magnify") # Thumbnails.
# Thumbnail captions.
s = strip_element(s, "div", "class=\"thumb ")
s = strip_element(s, "div", "class=\"barbox") # Bar charts.
# Hidden from print.
s = strip_element(s, "div", "class=\"noprint")
s = strip_element(s, "sup", "class=\"noprint")
# Strip absolute elements (don't know their position).
s = strip_element(s, "div", "style=\"position:absolute")
# Strip meta <span> elements.
s = strip_element(s, "span", "class=\"error")
# Strip math formulas, add [math] placeholder.
# LaTex math images.
s = re.sub(r"<img class=\"tex\".*?/>", "[math]", s)
s = plaintext(s, **kwargs)
# Strip [edit] link (language dependent.)
s = re.sub(r"\[edit\]\s*", "", s)
s = re.sub(r"\[%s\]\s*" % {
"en": "edit",
"es": u"editar código",
"de": "Bearbeiten",
"fr": "modifier le code",
"it": "modifica sorgente",
"nl": "bewerken",
}.get(self.language, "edit"), "", s)
# Insert space before inline references.
s = s.replace("[", " [").replace(" [", " [")
# Strip inline references.
#s = re.sub(r" \[[0-9]+\]", "", s)
return s |
Python | def links(self, path="/wiki/"):
"""Yields a list of Wikipedia links in this section.
Similar in functionality to MediaWikiArticle.links.
"""
if self._links is None:
a = HTMLLinkParser().parse(self.source)
a = (decode_url(a.url) for a in a)
a = (a[len(path):].replace("_", " ")
for a in a if a.startswith(path))
a = (a for a in a if not _mediawiki_namespace.match(a))
self._links = sorted(set(a))
return self._links | def links(self, path="/wiki/"):
"""Yields a list of Wikipedia links in this section.
Similar in functionality to MediaWikiArticle.links.
"""
if self._links is None:
a = HTMLLinkParser().parse(self.source)
a = (decode_url(a.url) for a in a)
a = (a[len(path):].replace("_", " ")
for a in a if a.startswith(path))
a = (a for a in a if not _mediawiki_namespace.match(a))
self._links = sorted(set(a))
return self._links |
Python | def tables(self):
"""Yields a list of MediaWikiTable objects in the section."""
if self._tables is None:
self._tables = []
for style in ("wikitable", "sortable wikitable"):
b = "<table class=\"%s\"" % style, "</table>"
p = self.article._plaintext
f = find_between
for s in f(b[0], b[1], self.source):
t = self.article.parser.MediaWikiTable(self,
title=p(
(f(r"<caption.*?>", "</caption>", s) + [""])[0]),
source=b[0] + s + b[1])
# 1) Parse <td> and <th> content and format it as plain text.
# 2) Parse <td colspan=""> attribute, duplicate spanning cells.
# 3) For <th> in the first row, update
# MediaWikiTable.headers.
for i, row in enumerate(f(r"<tr", "</tr>", s)):
r1 = f(r"<t[d|h]", r"</t[d|h]>", row)
r1 = (
((f(r'colspan="', r'"', v) + [1])[0], v[v.find(">") + 1:]) for v in r1)
r1 = ((int(n), v) for n, v in r1)
r2 = []
[[r2.append(p(v)) for j in range(n)] for n, v in r1]
if i == 0 and "</th>" in row:
t.headers = r2
else:
t.rows.append(r2)
self._tables.append(t)
return self._tables | def tables(self):
"""Yields a list of MediaWikiTable objects in the section."""
if self._tables is None:
self._tables = []
for style in ("wikitable", "sortable wikitable"):
b = "<table class=\"%s\"" % style, "</table>"
p = self.article._plaintext
f = find_between
for s in f(b[0], b[1], self.source):
t = self.article.parser.MediaWikiTable(self,
title=p(
(f(r"<caption.*?>", "</caption>", s) + [""])[0]),
source=b[0] + s + b[1])
# 1) Parse <td> and <th> content and format it as plain text.
# 2) Parse <td colspan=""> attribute, duplicate spanning cells.
# 3) For <th> in the first row, update
# MediaWikiTable.headers.
for i, row in enumerate(f(r"<tr", "</tr>", s)):
r1 = f(r"<t[d|h]", r"</t[d|h]>", row)
r1 = (
((f(r'colspan="', r'"', v) + [1])[0], v[v.find(">") + 1:]) for v in r1)
r1 = ((int(n), v) for n, v in r1)
r2 = []
[[r2.append(p(v)) for j in range(n)] for n, v in r1]
if i == 0 and "</th>" in row:
t.headers = r2
else:
t.rows.append(r2)
self._tables.append(t)
return self._tables |
Python | def download(self, media, **kwargs):
"""Downloads an item from MediaWikiArticle.media and returns the
content.
Note: images on Wikipedia can be quite large, and this method uses screen-scraping,
so Wikipedia might not like it that you download media in this way.
To save the media in a file:
data = article.download(media)
open(filename+extension(media),"w").write(data)
"""
url = "http://%s.wikipedia.org/wiki/File:%s" % (
self.__dict__.get("language", "en"), media)
if url not in CACHE:
time.sleep(1)
data = URL(url).download(**kwargs)
data = re.search(r"upload.wikimedia.org/.*?/%s" % media, data)
data = data and URL(
"http://" + data.group(0)).download(**kwargs) or None
return data | def download(self, media, **kwargs):
"""Downloads an item from MediaWikiArticle.media and returns the
content.
Note: images on Wikipedia can be quite large, and this method uses screen-scraping,
so Wikipedia might not like it that you download media in this way.
To save the media in a file:
data = article.download(media)
open(filename+extension(media),"w").write(data)
"""
url = "http://%s.wikipedia.org/wiki/File:%s" % (
self.__dict__.get("language", "en"), media)
if url not in CACHE:
time.sleep(1)
data = URL(url).download(**kwargs)
data = re.search(r"upload.wikimedia.org/.*?/%s" % media, data)
data = data and URL(
"http://" + data.group(0)).download(**kwargs) or None
return data |
Python | def search(self, query, type=SPARQL, start=1, count=10, sort=RELEVANCY, size=None, cached=False, **kwargs):
"""Returns a list of results from DBPedia for the given SPARQL query.
- type : SPARQL,
- start: no maximum,
- count: maximum 1000,
There is a limit of 10 requests/second.
Maximum query execution time is 120 seconds.
"""
if type not in (SPARQL,):
raise SearchEngineTypeError
if not query or count < 1 or start < 1:
return Results(DBPEDIA, query, type)
# 1) Construct request URL.
url = URL(DBPEDIA, method=GET)
url.query = {
"format": "json",
"query": "%s OFFSET %s LIMIT %s" % (query,
(start - 1) * min(count, 1000),
(start - 0) * min(count, 1000)
)
}
# 2) Parse JSON response.
try:
data = URL(url).download(cached=cached, timeout=30, **kwargs)
data = json.loads(data)
except HTTP400BadRequest as e:
raise DBPediaQueryError(e.src.read().splitlines()[0])
except HTTP403Forbidden:
raise SearchEngineLimitError
results = Results(DBPEDIA, url.query, type)
results.total = None
for x in data["results"]["bindings"]:
r = Result(url=None)
for k in data["head"]["vars"]:
# uri | literal | typed-literal
t1 = x[k].get("type", "literal")
# http://www.w3.org/2001/XMLSchema#float | int | date
t2 = x[k].get("datatype", "?")
v = x[k].get("value")
v = self.format(v)
if t1 == "uri":
v = DBPediaResource(v)
if t2.endswith("float"):
v = float(v)
if t2.endswith("int"):
v = int(v)
dict.__setitem__(r, k, v)
results.append(r)
return results | def search(self, query, type=SPARQL, start=1, count=10, sort=RELEVANCY, size=None, cached=False, **kwargs):
"""Returns a list of results from DBPedia for the given SPARQL query.
- type : SPARQL,
- start: no maximum,
- count: maximum 1000,
There is a limit of 10 requests/second.
Maximum query execution time is 120 seconds.
"""
if type not in (SPARQL,):
raise SearchEngineTypeError
if not query or count < 1 or start < 1:
return Results(DBPEDIA, query, type)
# 1) Construct request URL.
url = URL(DBPEDIA, method=GET)
url.query = {
"format": "json",
"query": "%s OFFSET %s LIMIT %s" % (query,
(start - 1) * min(count, 1000),
(start - 0) * min(count, 1000)
)
}
# 2) Parse JSON response.
try:
data = URL(url).download(cached=cached, timeout=30, **kwargs)
data = json.loads(data)
except HTTP400BadRequest as e:
raise DBPediaQueryError(e.src.read().splitlines()[0])
except HTTP403Forbidden:
raise SearchEngineLimitError
results = Results(DBPEDIA, url.query, type)
results.total = None
for x in data["results"]["bindings"]:
r = Result(url=None)
for k in data["head"]["vars"]:
# uri | literal | typed-literal
t1 = x[k].get("type", "literal")
# http://www.w3.org/2001/XMLSchema#float | int | date
t2 = x[k].get("datatype", "?")
v = x[k].get("value")
v = self.format(v)
if t1 == "uri":
v = DBPediaResource(v)
if t2.endswith("float"):
v = float(v)
if t2.endswith("int"):
v = int(v)
dict.__setitem__(r, k, v)
results.append(r)
return results |
Python | def search(self, query, type=IMAGE, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
"""Returns a list of results from Flickr for the given query.
Retrieving the URL of a result (i.e. image) requires an additional
query.
- type : SEARCH, IMAGE,
- start: maximum undefined,
- count: maximum 500,
- sort : RELEVANCY, LATEST or INTERESTING.
There is no daily limit.
"""
if type not in (SEARCH, IMAGE):
raise SearchEngineTypeError
if not query or count < 1 or start < 1 or start > 500 / count:
return Results(FLICKR, query, IMAGE)
# 1) Construct request URL.
url = FLICKR + "?"
url = URL(url, method=GET, query={
"api_key": self.license or "",
"method": "flickr.photos.search",
"text": query.replace(" ", "_"),
"page": start,
"per_page": min(count, 500),
"sort": {RELEVANCY: "relevance",
LATEST: "date-posted-desc",
INTERESTING: "interestingness-desc"}.get(sort)
})
if kwargs.get("copyright", True) is False:
# With copyright=False, only returns Public Domain and Creative Commons images.
# http://www.flickr.com/services/api/flickr.photos.licenses.getInfo.html
# 5: "Attribution-ShareAlike License"
# 7: "No known copyright restriction"
url.query["license"] = "5,7"
# 2) Parse XML response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = xml.dom.minidom.parseString(bytestring(data))
results = Results(FLICKR, query, IMAGE)
results.total = int(
data.getElementsByTagName("photos")[0].getAttribute("total"))
for x in data.getElementsByTagName("photo"):
r = FlickrResult(url=None)
r.__dict__["_id"] = x.getAttribute("id")
r.__dict__["_size"] = size
r.__dict__["_license"] = self.license
r.__dict__["_throttle"] = self.throttle
r.text = self.format(x.getAttribute("title"))
r.author = self.format(x.getAttribute("owner"))
results.append(r)
return results | def search(self, query, type=IMAGE, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
"""Returns a list of results from Flickr for the given query.
Retrieving the URL of a result (i.e. image) requires an additional
query.
- type : SEARCH, IMAGE,
- start: maximum undefined,
- count: maximum 500,
- sort : RELEVANCY, LATEST or INTERESTING.
There is no daily limit.
"""
if type not in (SEARCH, IMAGE):
raise SearchEngineTypeError
if not query or count < 1 or start < 1 or start > 500 / count:
return Results(FLICKR, query, IMAGE)
# 1) Construct request URL.
url = FLICKR + "?"
url = URL(url, method=GET, query={
"api_key": self.license or "",
"method": "flickr.photos.search",
"text": query.replace(" ", "_"),
"page": start,
"per_page": min(count, 500),
"sort": {RELEVANCY: "relevance",
LATEST: "date-posted-desc",
INTERESTING: "interestingness-desc"}.get(sort)
})
if kwargs.get("copyright", True) is False:
# With copyright=False, only returns Public Domain and Creative Commons images.
# http://www.flickr.com/services/api/flickr.photos.licenses.getInfo.html
# 5: "Attribution-ShareAlike License"
# 7: "No known copyright restriction"
url.query["license"] = "5,7"
# 2) Parse XML response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = xml.dom.minidom.parseString(bytestring(data))
results = Results(FLICKR, query, IMAGE)
results.total = int(
data.getElementsByTagName("photos")[0].getAttribute("total"))
for x in data.getElementsByTagName("photo"):
r = FlickrResult(url=None)
r.__dict__["_id"] = x.getAttribute("id")
r.__dict__["_size"] = size
r.__dict__["_license"] = self.license
r.__dict__["_throttle"] = self.throttle
r.text = self.format(x.getAttribute("title"))
r.author = self.format(x.getAttribute("owner"))
results.append(r)
return results |
Python | def search(self, query, type=SEARCH, start=1, count=10, cached=False, **kwargs):
"""Returns a list of results from Facebook public status updates for
the given query.
- query: string, or Result.id for NEWS and COMMENTS,
- type : SEARCH,
- start: 1,
- count: maximum 100 for SEARCH and NEWS, 1000 for COMMENTS and LIKES.
There is an hourly limit of +-600 queries (actual amount undisclosed).
"""
# Facebook.search(type=SEARCH) returns public posts + author.
# Facebook.search(type=NEWS) returns posts for the given author (id | alias | "me").
# Facebook.search(type=COMMENTS) returns comments for the given post id.
# Facebook.search(type=LIKES) returns authors for the given author, post or comments.
# Facebook.search(type=FRIENDS) returns authors for the given author.
# An author is a Facebook user or other entity (e.g., a product page).
if type not in (SEARCH, NEWS, COMMENTS, LIKES, FRIENDS):
raise SearchEngineTypeError
if type in (SEARCH, NEWS):
max = 100
if type in (COMMENTS, LIKES):
max = 1000
if type in (FRIENDS,):
max = 10000
if not query or start < 1 or count < 1:
return Results(FACEBOOK, query, SEARCH)
if isinstance(query, FacebookResult):
query = query.id
# 1) Construct request URL.
if type == SEARCH:
url = FACEBOOK + type
url = URL(url, method=GET, query={
"q": query,
"type": "post",
"access_token": self.license,
"offset": (start - 1) * min(count, max),
"limit": (start - 0) * min(count, max)
})
if type in (NEWS, FEED, COMMENTS, LIKES, FRIENDS):
url = FACEBOOK + \
(u(query) or "me").replace(FACEBOOK, "") + \
"/" + type.replace("news", "feed")
url = URL(url, method=GET, query={
"access_token": self.license,
"offset": (start - 1) * min(count, max),
"limit": (start - 0) * min(count, max),
})
if type in (SEARCH, NEWS, FEED):
url.query["fields"] = ",".join((
"id", "from", "name", "story", "message", "link", "picture", "created_time", "shares",
"comments.limit(1).summary(true)",
"likes.limit(1).summary(true)"
))
# 2) Parse JSON response.
kwargs.setdefault("cached", cached)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = URL(url).download(**kwargs)
except HTTP400BadRequest:
raise HTTP401Authentication
data = json.loads(data)
results = Results(FACEBOOK, query, SEARCH)
results.total = None
for x in data.get("data", []):
r = FacebookResult(url=None)
r.id = self.format(x.get("id"))
r.url = self.format(x.get("link"))
r.text = self.format(
x.get("story", x.get("message", x.get("name"))))
r.date = self.format(x.get("created_time"))
r.votes = self.format(x.get(
"like_count", x.get("likes", {}).get("summary", {}).get("total_count", 0)) + 0)
r.shares = self.format(x.get("shares", {}).get("count", 0))
r.comments = self.format(
x.get("comments", {}).get("summary", {}).get("total_count", 0) + 0)
r.author = self.format(x.get("from", {}).get("id", "")), \
self.format(x.get("from", {}).get("name", ""))
# Set Result.text to author name for likes.
if type in (LIKES, FRIENDS):
r.author = \
self.format(x.get("id", "")), \
self.format(x.get("name", ""))
r.text = self.format(x.get("name"))
# Set Result.url to full-size image.
if re.match(r"^http(s?)://www\.facebook\.com/photo", r.url) is not None:
r.url = x.get("picture", "").replace("_s", "_b") or r.url
# Set Result.title to object id.
if re.match(r"^http(s?)://www\.facebook\.com/", r.url) is not None:
r.title = r.url.split("/")[-1].split("?")[0]
results.append(r)
return results | def search(self, query, type=SEARCH, start=1, count=10, cached=False, **kwargs):
"""Returns a list of results from Facebook public status updates for
the given query.
- query: string, or Result.id for NEWS and COMMENTS,
- type : SEARCH,
- start: 1,
- count: maximum 100 for SEARCH and NEWS, 1000 for COMMENTS and LIKES.
There is an hourly limit of +-600 queries (actual amount undisclosed).
"""
# Facebook.search(type=SEARCH) returns public posts + author.
# Facebook.search(type=NEWS) returns posts for the given author (id | alias | "me").
# Facebook.search(type=COMMENTS) returns comments for the given post id.
# Facebook.search(type=LIKES) returns authors for the given author, post or comments.
# Facebook.search(type=FRIENDS) returns authors for the given author.
# An author is a Facebook user or other entity (e.g., a product page).
if type not in (SEARCH, NEWS, COMMENTS, LIKES, FRIENDS):
raise SearchEngineTypeError
if type in (SEARCH, NEWS):
max = 100
if type in (COMMENTS, LIKES):
max = 1000
if type in (FRIENDS,):
max = 10000
if not query or start < 1 or count < 1:
return Results(FACEBOOK, query, SEARCH)
if isinstance(query, FacebookResult):
query = query.id
# 1) Construct request URL.
if type == SEARCH:
url = FACEBOOK + type
url = URL(url, method=GET, query={
"q": query,
"type": "post",
"access_token": self.license,
"offset": (start - 1) * min(count, max),
"limit": (start - 0) * min(count, max)
})
if type in (NEWS, FEED, COMMENTS, LIKES, FRIENDS):
url = FACEBOOK + \
(u(query) or "me").replace(FACEBOOK, "") + \
"/" + type.replace("news", "feed")
url = URL(url, method=GET, query={
"access_token": self.license,
"offset": (start - 1) * min(count, max),
"limit": (start - 0) * min(count, max),
})
if type in (SEARCH, NEWS, FEED):
url.query["fields"] = ",".join((
"id", "from", "name", "story", "message", "link", "picture", "created_time", "shares",
"comments.limit(1).summary(true)",
"likes.limit(1).summary(true)"
))
# 2) Parse JSON response.
kwargs.setdefault("cached", cached)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = URL(url).download(**kwargs)
except HTTP400BadRequest:
raise HTTP401Authentication
data = json.loads(data)
results = Results(FACEBOOK, query, SEARCH)
results.total = None
for x in data.get("data", []):
r = FacebookResult(url=None)
r.id = self.format(x.get("id"))
r.url = self.format(x.get("link"))
r.text = self.format(
x.get("story", x.get("message", x.get("name"))))
r.date = self.format(x.get("created_time"))
r.votes = self.format(x.get(
"like_count", x.get("likes", {}).get("summary", {}).get("total_count", 0)) + 0)
r.shares = self.format(x.get("shares", {}).get("count", 0))
r.comments = self.format(
x.get("comments", {}).get("summary", {}).get("total_count", 0) + 0)
r.author = self.format(x.get("from", {}).get("id", "")), \
self.format(x.get("from", {}).get("name", ""))
# Set Result.text to author name for likes.
if type in (LIKES, FRIENDS):
r.author = \
self.format(x.get("id", "")), \
self.format(x.get("name", ""))
r.text = self.format(x.get("name"))
# Set Result.url to full-size image.
if re.match(r"^http(s?)://www\.facebook\.com/photo", r.url) is not None:
r.url = x.get("picture", "").replace("_s", "_b") or r.url
# Set Result.title to object id.
if re.match(r"^http(s?)://www\.facebook\.com/", r.url) is not None:
r.title = r.url.split("/")[-1].split("?")[0]
results.append(r)
return results |
Python | def profile(self, id=None, **kwargs):
"""Returns a Result for the given author id or alias."""
# 1) Construct request URL.
url = FACEBOOK + (u(id or "me")).replace(FACEBOOK, "")
url = URL(url, method=GET, query={"access_token": self.license})
kwargs.setdefault("cached", True)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
# 2) Parse JSON response.
try:
data = URL(url).download(**kwargs)
data = json.loads(data)
except HTTP400BadRequest:
raise HTTP401Authentication
return Result(
id=data.get("id", ""), # 123456...
# https://www.facebook.com/tomdesmedt
url=data.get("link", ""),
handle=data.get("username", ""), # tomdesmedt
name=data.get("name"), # Tom De Smedt
# Artist, scientist, software engineer
text=data.get("description", ""),
language=data.get("locale", "").split("_")[0], # en_US
date=data.get("birthday", ""), # 10/10/1000
gender=data.get("gender", "")[:1], # m
locale=data.get("hometown", {}).get("name", ""),
votes=int(data.get("likes", 0)) # (for product pages)
) | def profile(self, id=None, **kwargs):
"""Returns a Result for the given author id or alias."""
# 1) Construct request URL.
url = FACEBOOK + (u(id or "me")).replace(FACEBOOK, "")
url = URL(url, method=GET, query={"access_token": self.license})
kwargs.setdefault("cached", True)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
# 2) Parse JSON response.
try:
data = URL(url).download(**kwargs)
data = json.loads(data)
except HTTP400BadRequest:
raise HTTP401Authentication
return Result(
id=data.get("id", ""), # 123456...
# https://www.facebook.com/tomdesmedt
url=data.get("link", ""),
handle=data.get("username", ""), # tomdesmedt
name=data.get("name"), # Tom De Smedt
# Artist, scientist, software engineer
text=data.get("description", ""),
language=data.get("locale", "").split("_")[0], # en_US
date=data.get("birthday", ""), # 10/10/1000
gender=data.get("gender", "")[:1], # m
locale=data.get("hometown", {}).get("name", ""),
votes=int(data.get("likes", 0)) # (for product pages)
) |
Python | def search(self, query, type=NEWS, start=1, count=10, sort=LATEST, size=SMALL, cached=True, **kwargs):
"""Returns a list of results from the given RSS or Atom newsfeed
URL."""
if type != NEWS:
raise SearchEngineTypeError
if not query or start < 1 or count < 1:
return Results(query, query, NEWS)
# 1) Construct request URL.
# 2) Parse RSS/Atom response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
tags = kwargs.pop("tags", [])
data = URL(query).download(cached=cached, **kwargs)
data = feedparser.parse(bytestring(data))
results = Results(query, query, NEWS)
results.total = None
for x in data["entries"][:count]:
s = "\n\n".join([v.get("value")
for v in x.get("content", [])]) or x.get("summary")
r = Result(url=None)
r.id = self.format(x.get("id"))
r.url = self.format(x.get("link"))
r.title = self.format(x.get("title"))
r.text = self.format(s)
r.date = self.format(x.get("updated"))
r.author = self.format(x.get("author"))
r.language = self.format(x.get("content") and
x.get("content")[0].get("language") or
data.get("language"))
for tag in tags:
# Parse custom tags.
# Newsfeed.search(tags=["dc:identifier"]) =>
# Result.dc_identifier.
tag = tag.replace(":", "_")
r[tag] = self.format(x.get(tag))
results.append(r)
return results | def search(self, query, type=NEWS, start=1, count=10, sort=LATEST, size=SMALL, cached=True, **kwargs):
"""Returns a list of results from the given RSS or Atom newsfeed
URL."""
if type != NEWS:
raise SearchEngineTypeError
if not query or start < 1 or count < 1:
return Results(query, query, NEWS)
# 1) Construct request URL.
# 2) Parse RSS/Atom response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
tags = kwargs.pop("tags", [])
data = URL(query).download(cached=cached, **kwargs)
data = feedparser.parse(bytestring(data))
results = Results(query, query, NEWS)
results.total = None
for x in data["entries"][:count]:
s = "\n\n".join([v.get("value")
for v in x.get("content", [])]) or x.get("summary")
r = Result(url=None)
r.id = self.format(x.get("id"))
r.url = self.format(x.get("link"))
r.title = self.format(x.get("title"))
r.text = self.format(s)
r.date = self.format(x.get("updated"))
r.author = self.format(x.get("author"))
r.language = self.format(x.get("content") and
x.get("content")[0].get("language") or
data.get("language"))
for tag in tags:
# Parse custom tags.
# Newsfeed.search(tags=["dc:identifier"]) =>
# Result.dc_identifier.
tag = tag.replace(":", "_")
r[tag] = self.format(x.get(tag))
results.append(r)
return results |
Python | def query(string, service=GOOGLE, **kwargs):
"""Returns the list of search query results from the given service.
For service=WIKIPEDIA, this is a single WikipediaArticle or None.
"""
service = service.lower()
if service in (GOOGLE, "google", "g"):
engine = Google
if service in (YAHOO, "yahoo", "y!"):
engine = Yahoo
if service in (BING, "bing"):
engine = Bing
if service in (DUCKDUCKGO, "duckduckgo", "ddg"):
engine = DuckDuckGo
if service in (TWITTER, "twitter", "tw"):
engine = Twitter
if service in (FACEBOOK, "facebook", "fb"):
engine = Facebook
if service in (WIKIPEDIA, "wikipedia", "wp"):
engine = Wikipedia
if service in (WIKIA, "wikia"):
engine = Wikia
if service in (DBPEDIA, "dbpedia", "dbp"):
engine = DBPedia
if service in (FLICKR, "flickr"):
engine = Flickr
try:
kw = {}
for a in ("license", "throttle", "language"):
if a in kwargs:
kw[a] = kwargs.pop(a)
return engine(kw).search(string, **kwargs)
except UnboundLocalError:
raise SearchEngineError("unknown search engine '%s'" % service) | def query(string, service=GOOGLE, **kwargs):
"""Returns the list of search query results from the given service.
For service=WIKIPEDIA, this is a single WikipediaArticle or None.
"""
service = service.lower()
if service in (GOOGLE, "google", "g"):
engine = Google
if service in (YAHOO, "yahoo", "y!"):
engine = Yahoo
if service in (BING, "bing"):
engine = Bing
if service in (DUCKDUCKGO, "duckduckgo", "ddg"):
engine = DuckDuckGo
if service in (TWITTER, "twitter", "tw"):
engine = Twitter
if service in (FACEBOOK, "facebook", "fb"):
engine = Facebook
if service in (WIKIPEDIA, "wikipedia", "wp"):
engine = Wikipedia
if service in (WIKIA, "wikia"):
engine = Wikia
if service in (DBPEDIA, "dbpedia", "dbp"):
engine = DBPedia
if service in (FLICKR, "flickr"):
engine = Flickr
try:
kw = {}
for a in ("license", "throttle", "language"):
if a in kwargs:
kw[a] = kwargs.pop(a)
return engine(kw).search(string, **kwargs)
except UnboundLocalError:
raise SearchEngineError("unknown search engine '%s'" % service) |
Python | def sort(terms=[], context="", service=GOOGLE, license=None, strict=True, prefix=False, **kwargs):
""" Returns a list of (percentage, term)-tuples for the given list of terms.
Sorts the terms in the list according to search result count.
When a context is defined, sorts according to relevancy to the context, e.g.:
sort(terms=["black", "green", "red"], context="Darth Vader") =>
yields "black" as the best candidate, because "black Darth Vader" is more common in search results.
- terms : list of search terms,
- context : term used for sorting,
- service : web service name (GOOGLE, YAHOO, BING),
- license : web service license id,
- strict : when True the query constructed from term + context is wrapped in quotes.
"""
service = SERVICES.get(service, SearchEngine)(
license, language=kwargs.pop("language", None))
R = []
for word in terms:
q = prefix and (context + " " + word) or (word + " " + context)
q.strip()
q = strict and "\"%s\"" % q or q
t = service in (WIKIPEDIA, WIKIA) and "*" or SEARCH
r = service.search(q, type=t, count=1, **kwargs)
R.append(r)
s = float(sum([r.total or 1 for r in R])) or 1.0
R = [((r.total or 1) / s, r.query) for r in R]
R = sorted(R, reverse=kwargs.pop("reverse", True))
return R | def sort(terms=[], context="", service=GOOGLE, license=None, strict=True, prefix=False, **kwargs):
""" Returns a list of (percentage, term)-tuples for the given list of terms.
Sorts the terms in the list according to search result count.
When a context is defined, sorts according to relevancy to the context, e.g.:
sort(terms=["black", "green", "red"], context="Darth Vader") =>
yields "black" as the best candidate, because "black Darth Vader" is more common in search results.
- terms : list of search terms,
- context : term used for sorting,
- service : web service name (GOOGLE, YAHOO, BING),
- license : web service license id,
- strict : when True the query constructed from term + context is wrapped in quotes.
"""
service = SERVICES.get(service, SearchEngine)(
license, language=kwargs.pop("language", None))
R = []
for word in terms:
q = prefix and (context + " " + word) or (word + " " + context)
q.strip()
q = strict and "\"%s\"" % q or q
t = service in (WIKIPEDIA, WIKIA) and "*" or SEARCH
r = service.search(q, type=t, count=1, **kwargs)
R.append(r)
s = float(sum([r.total or 1 for r in R])) or 1.0
R = [((r.total or 1) / s, r.query) for r in R]
R = sorted(R, reverse=kwargs.pop("reverse", True))
return R |
Python | def traverse(self, visit=lambda node: None):
"""Executes the visit function on this node and each of its child
nodes."""
visit(self)
[node.traverse(visit) for node in self.children] | def traverse(self, visit=lambda node: None):
"""Executes the visit function on this node and each of its child
nodes."""
visit(self)
[node.traverse(visit) for node in self.children] |
Python | def declaration(self):
"""Yields the <!doctype> declaration, as a TEXT Node or None."""
for child in self.children:
if isinstance(child._p, bs4.Doctype): # previously Declaration
return child | def declaration(self):
"""Yields the <!doctype> declaration, as a TEXT Node or None."""
for child in self.children:
if isinstance(child._p, bs4.Doctype): # previously Declaration
return child |
Python | def _parse_attribute(self, s):
""" Returns an (attribute, value)-tuple for the given attribute selector.
"""
s = s.strip("[]")
s = s.replace("'", "")
s = s.replace('"', "")
s = _decode_space(s)
s = re.sub(r"(\~|\||\^|\$|\*)\=", "=\\1", s)
s = s.split("=") + [True]
s = s[:2]
if s[1] is not True:
r = r"^%s$"
if s[1].startswith(("~", "|", "^", "$", "*")):
p, s[1] = s[1][0], s[1][1:]
if p == "~":
r = r"(^|\s)%s(\s|$)"
if p == "|":
r = r"^%s(-|$)" # XXX doesn't work with spaces.
if p == "^":
r = r"^%s"
if p == "$":
r = r"%s$"
if p == "*":
r = r"%s"
s[1] = re.compile(r % s[1], re.I)
return s[:2] | def _parse_attribute(self, s):
""" Returns an (attribute, value)-tuple for the given attribute selector.
"""
s = s.strip("[]")
s = s.replace("'", "")
s = s.replace('"', "")
s = _decode_space(s)
s = re.sub(r"(\~|\||\^|\$|\*)\=", "=\\1", s)
s = s.split("=") + [True]
s = s[:2]
if s[1] is not True:
r = r"^%s$"
if s[1].startswith(("~", "|", "^", "$", "*")):
p, s[1] = s[1][0], s[1][1:]
if p == "~":
r = r"(^|\s)%s(\s|$)"
if p == "|":
r = r"^%s(-|$)" # XXX doesn't work with spaces.
if p == "^":
r = r"^%s"
if p == "$":
r = r"%s$"
if p == "*":
r = r"%s"
s[1] = re.compile(r % s[1], re.I)
return s[:2] |
Python | def _first_child(self, e):
"""Returns the first child Element of the given element."""
if isinstance(e, Node):
for e in e.children:
if isinstance(e, Element):
return e | def _first_child(self, e):
"""Returns the first child Element of the given element."""
if isinstance(e, Node):
for e in e.children:
if isinstance(e, Element):
return e |
Python | def _next_sibling(self, e):
"""Returns the first next sibling Element of the given element."""
while isinstance(e, Node):
e = e.next
if isinstance(e, Element):
return e | def _next_sibling(self, e):
"""Returns the first next sibling Element of the given element."""
while isinstance(e, Node):
e = e.next
if isinstance(e, Element):
return e |
Python | def _previous_sibling(self, e):
"""Returns the last previous sibling Element of the given element."""
while isinstance(e, Node):
e = e.previous
if isinstance(e, Element):
return e | def _previous_sibling(self, e):
"""Returns the last previous sibling Element of the given element."""
while isinstance(e, Node):
e = e.previous
if isinstance(e, Element):
return e |
Python | def _contains(self, e, s):
""" Returns True if string s occurs in the given element (case-insensitive).
"""
s = re.sub(r"^contains\((.*?)\)$", "\\1", s)
s = re.sub(r"^[\"']|[\"']$", "", s)
s = _decode_space(s)
return re.search(s.lower(), e.content.lower()) is not None | def _contains(self, e, s):
""" Returns True if string s occurs in the given element (case-insensitive).
"""
s = re.sub(r"^contains\((.*?)\)$", "\\1", s)
s = re.sub(r"^[\"']|[\"']$", "", s)
s = _decode_space(s)
return re.search(s.lower(), e.content.lower()) is not None |
Python | def match(self, e):
"""Returns True if the given element matches the simple CSS
selector."""
if not isinstance(e, Element):
return False
if self.tag not in (e.tag, "*"):
return False
if self.id not in ((e.id or "").lower(), "", None):
return False
if self.classes.issubset(set(map(lambda s: s.lower(), e.attr.get("class", [])))) is False:
return False
if "first-child" in self.pseudo and self._first_child(e.parent) != e:
return False
if any(x.startswith("contains") and not self._contains(e, x) for x in self.pseudo):
return False # jQuery :contains("...") selector.
for k, v in self.attributes.items():
# TODO is ' '.join(e.attrs[k])) correct?
if k not in e.attrs or not (v is True or re.search(v, ' '.join(e.attrs[k])) is not None):
return False
return True | def match(self, e):
"""Returns True if the given element matches the simple CSS
selector."""
if not isinstance(e, Element):
return False
if self.tag not in (e.tag, "*"):
return False
if self.id not in ((e.id or "").lower(), "", None):
return False
if self.classes.issubset(set(map(lambda s: s.lower(), e.attr.get("class", [])))) is False:
return False
if "first-child" in self.pseudo and self._first_child(e.parent) != e:
return False
if any(x.startswith("contains") and not self._contains(e, x) for x in self.pseudo):
return False # jQuery :contains("...") selector.
for k, v in self.attributes.items():
# TODO is ' '.join(e.attrs[k])) correct?
if k not in e.attrs or not (v is True or re.search(v, ' '.join(e.attrs[k])) is not None):
return False
return True |
Python | def search(self, e):
"""Returns the nested elements that match the simple CSS selector."""
# Map tag to True if it is "*".
tag = self.tag == "*" or self.tag
# Map id into a case-insensitive **kwargs dict.
i = lambda s: re.compile(r"\b%s(?=$|\s)" % s, re.I)
a = {"id": i(self.id)} if self.id else {}
a.update(map(lambda kv: (kv[0], kv[1]), self.attributes.items()))
# Match tag + id + all classes + relevant pseudo-elements.
if not isinstance(e, Element):
return []
if len(self.classes) == 0 or len(self.classes) >= 2:
e = map(Element, e._p.findAll(tag, attrs=a))
if len(self.classes) == 1:
e = map(Element,
e._p.findAll(tag, atts=dict(a, **{"class": i(list(self.classes)[0])})))
if len(self.classes) >= 2:
# e = filter(lambda e: self.classes.issubset(set(e.attr.get("class", "").lower().split())), e)
e = filter(lambda e: self.classes.issubset(
set([c.lower() for c in e.attr.get("class", "")])), e)
if "first-child" in self.pseudo:
e = filter(lambda e: e == self._first_child(e.parent), e)
if any(x.startswith("contains") for x in self.pseudo):
e = filter(lambda e: all(
not x.startswith("contains") or self._contains(e, x) for x in self.pseudo), e)
return e | def search(self, e):
"""Returns the nested elements that match the simple CSS selector."""
# Map tag to True if it is "*".
tag = self.tag == "*" or self.tag
# Map id into a case-insensitive **kwargs dict.
i = lambda s: re.compile(r"\b%s(?=$|\s)" % s, re.I)
a = {"id": i(self.id)} if self.id else {}
a.update(map(lambda kv: (kv[0], kv[1]), self.attributes.items()))
# Match tag + id + all classes + relevant pseudo-elements.
if not isinstance(e, Element):
return []
if len(self.classes) == 0 or len(self.classes) >= 2:
e = map(Element, e._p.findAll(tag, attrs=a))
if len(self.classes) == 1:
e = map(Element,
e._p.findAll(tag, atts=dict(a, **{"class": i(list(self.classes)[0])})))
if len(self.classes) >= 2:
# e = filter(lambda e: self.classes.issubset(set(e.attr.get("class", "").lower().split())), e)
e = filter(lambda e: self.classes.issubset(
set([c.lower() for c in e.attr.get("class", "")])), e)
if "first-child" in self.pseudo:
e = filter(lambda e: e == self._first_child(e.parent), e)
if any(x.startswith("contains") for x in self.pseudo):
e = filter(lambda e: all(
not x.startswith("contains") or self._contains(e, x) for x in self.pseudo), e)
return e |
Python | def search(self, e):
"""Returns the nested elements that match the CSS selector chain."""
m, root = [], e
for chain in self:
e = [root]
for combinator, s in chain:
# Search Y, where:
if combinator == " ":
# X Y => X is ancestor of Y
e = map(s.search, e)
e = list(itertools.chain(*e))
if combinator == ">":
# X > Y => X is parent of Y
e = map(lambda e: filter(s.match, e.children), e)
e = list(itertools.chain(*e))
if combinator == "<":
# X < Y => X is child of Y
e = map(lambda e: e.parent, e)
e = filter(s.match, e)
if combinator == "+":
# X + Y => X directly precedes Y
e = map(s._next_sibling, e)
e = filter(s.match, e)
m.extend(e)
return m | def search(self, e):
"""Returns the nested elements that match the CSS selector chain."""
m, root = [], e
for chain in self:
e = [root]
for combinator, s in chain:
# Search Y, where:
if combinator == " ":
# X Y => X is ancestor of Y
e = map(s.search, e)
e = list(itertools.chain(*e))
if combinator == ">":
# X > Y => X is parent of Y
e = map(lambda e: filter(s.match, e.children), e)
e = list(itertools.chain(*e))
if combinator == "<":
# X < Y => X is child of Y
e = map(lambda e: e.parent, e)
e = filter(s.match, e)
if combinator == "+":
# X + Y => X directly precedes Y
e = map(s._next_sibling, e)
e = filter(s.match, e)
m.extend(e)
return m |
Python | def parse(self, html, url=""):
"""Returns a list of Links parsed from the given HTML string."""
if html is None:
return None
self._url = url
self._data = []
self.feed(self.clean(html))
self.close()
self.reset()
return self._data | def parse(self, html, url=""):
"""Returns a list of Links parsed from the given HTML string."""
if html is None:
return None
self._url = url
self._data = []
self.feed(self.clean(html))
self.close()
self.reset()
return self._data |
Python | def push(self, link, priority=1.0, sort=FILO):
"""Pushes the given link to the queue.
Position in the queue is determined by priority.
Equal ranks are sorted FIFO or FILO.
With priority=1.0 and FILO, the link is inserted to the queue.
With priority=0.0 and FIFO, the link is appended to the queue.
"""
if not isinstance(link, Link):
link = Link(url=link)
dt = time.time()
dt = sort == FIFO and dt or 1 / dt
bisect.insort(self._queue, (1 - priority, dt, link))
self._queued[link.url] = True | def push(self, link, priority=1.0, sort=FILO):
"""Pushes the given link to the queue.
Position in the queue is determined by priority.
Equal ranks are sorted FIFO or FILO.
With priority=1.0 and FILO, the link is inserted to the queue.
With priority=0.0 and FIFO, the link is appended to the queue.
"""
if not isinstance(link, Link):
link = Link(url=link)
dt = time.time()
dt = sort == FIFO and dt or 1 / dt
bisect.insort(self._queue, (1 - priority, dt, link))
self._queued[link.url] = True |
Python | def pop(self, remove=True):
"""Returns the next Link queued to visit and removes it from the queue.
Links on a recently visited (sub)domain are skipped until
Crawler.delay has elapsed.
"""
now = time.time()
for i, (priority, dt, link) in enumerate(self._queue):
if self.delay <= now - self.history.get(base(link.url), 0):
if remove is True:
self._queue.pop(i)
self._queued.pop(link.url, None)
return link | def pop(self, remove=True):
"""Returns the next Link queued to visit and removes it from the queue.
Links on a recently visited (sub)domain are skipped until
Crawler.delay has elapsed.
"""
now = time.time()
for i, (priority, dt, link) in enumerate(self._queue):
if self.delay <= now - self.history.get(base(link.url), 0):
if remove is True:
self._queue.pop(i)
self._queued.pop(link.url, None)
return link |
Python | def crawl(self, method=DEPTH, **kwargs):
"""Visits the next link in Crawler._queue.
If the link is on a domain recently visited (< Crawler.delay) it
is skipped. Parses the content at the link for new links and
adds them to the queue, according to their Crawler.priority().
Visited links (and content) are passed to Crawler.visit().
"""
link = self.pop()
if link is None:
return False
if link.url not in self.visited:
t = time.time()
url = URL(link.url)
if url.mimetype == "text/html":
try:
kwargs.setdefault("unicode", True)
html = url.download(**kwargs)
for new in self.parse(html, url=link.url):
new.url = abs(new.url, base=url.redirect or link.url)
new.url = self.normalize(new.url)
# 1) Parse new links from HTML web pages.
# 2) Schedule unknown links for a visit.
# 3) Only links that are not already queued are queued.
# 4) Only links for which Crawler.follow() is True are queued.
# 5) Only links on Crawler.domains are queued.
if new.url == link.url:
continue
if new.url in self.visited:
continue
if new.url in self._queued:
continue
if self.follow(new) is False:
continue
if self.domains and not base(new.url).endswith(tuple(self.domains)):
continue
# 6) Limit the queue (remove tail), unless you are
# Google.
if self.QUEUE is not None and \
self.QUEUE * 1.25 < len(self._queue):
self._queue = self._queue[:self.QUEUE]
self._queued.clear()
self._queued.update(
dict((q[2].url, True) for q in self._queue))
# 7) Position in the queue is determined by Crawler.priority().
# 8) Equal ranks are sorted FIFO or FILO.
self.push(
new, priority=self.priority(new, method=method), sort=self.sort)
self.visit(link, source=html)
except URLError:
# URL can not be reached (HTTP404NotFound, URLTimeout).
self.fail(link)
else:
# URL MIME-type is not HTML, don't know how to handle.
self.fail(link)
# Log the current time visited for the domain (see Crawler.pop()).
# Log the URL as visited.
self.history[base(link.url)] = t
self.visited[link.url] = True
return True
# Nothing happened, we already visited this link.
return False | def crawl(self, method=DEPTH, **kwargs):
"""Visits the next link in Crawler._queue.
If the link is on a domain recently visited (< Crawler.delay) it
is skipped. Parses the content at the link for new links and
adds them to the queue, according to their Crawler.priority().
Visited links (and content) are passed to Crawler.visit().
"""
link = self.pop()
if link is None:
return False
if link.url not in self.visited:
t = time.time()
url = URL(link.url)
if url.mimetype == "text/html":
try:
kwargs.setdefault("unicode", True)
html = url.download(**kwargs)
for new in self.parse(html, url=link.url):
new.url = abs(new.url, base=url.redirect or link.url)
new.url = self.normalize(new.url)
# 1) Parse new links from HTML web pages.
# 2) Schedule unknown links for a visit.
# 3) Only links that are not already queued are queued.
# 4) Only links for which Crawler.follow() is True are queued.
# 5) Only links on Crawler.domains are queued.
if new.url == link.url:
continue
if new.url in self.visited:
continue
if new.url in self._queued:
continue
if self.follow(new) is False:
continue
if self.domains and not base(new.url).endswith(tuple(self.domains)):
continue
# 6) Limit the queue (remove tail), unless you are
# Google.
if self.QUEUE is not None and \
self.QUEUE * 1.25 < len(self._queue):
self._queue = self._queue[:self.QUEUE]
self._queued.clear()
self._queued.update(
dict((q[2].url, True) for q in self._queue))
# 7) Position in the queue is determined by Crawler.priority().
# 8) Equal ranks are sorted FIFO or FILO.
self.push(
new, priority=self.priority(new, method=method), sort=self.sort)
self.visit(link, source=html)
except URLError:
# URL can not be reached (HTTP404NotFound, URLTimeout).
self.fail(link)
else:
# URL MIME-type is not HTML, don't know how to handle.
self.fail(link)
# Log the current time visited for the domain (see Crawler.pop()).
# Log the URL as visited.
self.history[base(link.url)] = t
self.visited[link.url] = True
return True
# Nothing happened, we already visited this link.
return False |
Python | def priority(self, link, method=DEPTH):
""" Called from Crawler.crawl() to determine the priority of this link,
as a number between 0.0-1.0. Links with higher priority are visited first.
"""
# Depth-first search dislikes external links to other (sub)domains.
external = base(link.url) != base(link.referrer)
if external is True:
if method == DEPTH:
return 0.75
if method == BREADTH:
return 0.85
return 0.80 | def priority(self, link, method=DEPTH):
""" Called from Crawler.crawl() to determine the priority of this link,
as a number between 0.0-1.0. Links with higher priority are visited first.
"""
# Depth-first search dislikes external links to other (sub)domains.
external = base(link.url) != base(link.referrer)
if external is True:
if method == DEPTH:
return 0.75
if method == BREADTH:
return 0.85
return 0.80 |
Python | def visit(self, link, source=None):
"""Called from Crawler.crawl() when the link is crawled.
When source=None, the link is not a web page (and was not parsed),
or possibly a URLTimeout occured (content size too big).
"""
pass | def visit(self, link, source=None):
"""Called from Crawler.crawl() when the link is crawled.
When source=None, the link is not a web page (and was not parsed),
or possibly a URLTimeout occured (content size too big).
"""
pass |
Python | def fail(self, link):
""" Called from Crawler.crawl() for link whose MIME-type could not be determined,
or which raised a URLError on download.
"""
pass | def fail(self, link):
""" Called from Crawler.crawl() for link whose MIME-type could not be determined,
or which raised a URLError on download.
"""
pass |
Python | def _open(self, path):
""" Returns a file-like object with a read() method,
from the given file path or string.
"""
if isinstance(path, basestring) and os.path.exists(path):
return open(path, "rb")
if hasattr(path, "read"):
return path
return StringIO(path) | def _open(self, path):
""" Returns a file-like object with a read() method,
from the given file path or string.
"""
if isinstance(path, basestring) and os.path.exists(path):
return open(path, "rb")
if hasattr(path, "read"):
return path
return StringIO(path) |
Python | def process_pdf(rsrcmgr, device, fp, pagenos=None, maxpages=0,
password='', caching=True, check_extractable=True):
"""This function is depreciated in pdfminer."""
interpreter = PDFPageInterpreter(rsrcmgr, device)
for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages,
password=password, caching=caching,
check_extractable=check_extractable):
interpreter.process_page(page)
return | def process_pdf(rsrcmgr, device, fp, pagenos=None, maxpages=0,
password='', caching=True, check_extractable=True):
"""This function is depreciated in pdfminer."""
interpreter = PDFPageInterpreter(rsrcmgr, device)
for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages,
password=password, caching=caching,
check_extractable=check_extractable):
interpreter.process_page(page)
return |
Python | def _stream_cb(
self, in_data: bytes, frame_count: int,
time_info: dict, status_flags: int
) -> tuple:
"""
Callback for the stream listener.
:param in_data: recorded data
:param frame_count: number of frames read
:param time_info: dictionary with the following keys:
input_buffer_adc_time, current_time, and output_buffer_dac_time
:param status_flags: one of PortAudio Callback flags
:return: tuple of (out_data, flag)
"""
# Just store the data frames received and continue,
# so the callback finishes as fast as possible.
self._buf += in_data
return None, pyaudio.paContinue | def _stream_cb(
self, in_data: bytes, frame_count: int,
time_info: dict, status_flags: int
) -> tuple:
"""
Callback for the stream listener.
:param in_data: recorded data
:param frame_count: number of frames read
:param time_info: dictionary with the following keys:
input_buffer_adc_time, current_time, and output_buffer_dac_time
:param status_flags: one of PortAudio Callback flags
:return: tuple of (out_data, flag)
"""
# Just store the data frames received and continue,
# so the callback finishes as fast as possible.
self._buf += in_data
return None, pyaudio.paContinue |
Python | def sample_size(self) -> int:
"""
Get size of a single audio sample.
:return: Size in bytes
"""
return pyaudio.get_sample_size(self._sample_fmt) | def sample_size(self) -> int:
"""
Get size of a single audio sample.
:return: Size in bytes
"""
return pyaudio.get_sample_size(self._sample_fmt) |
Python | def _frames(self) -> Generator[int, Any, None]:
"""
Split and unpack the received data into sample frames
:return: iterator of audio sample frames as integers
"""
sample_size = self.sample_size
sample_count = ceil(len(self._buf) / sample_size)
return (self._unpack_frame(
self._buf[i * sample_size:(i + 1) * sample_size]
) for i in range(sample_count)) | def _frames(self) -> Generator[int, Any, None]:
"""
Split and unpack the received data into sample frames
:return: iterator of audio sample frames as integers
"""
sample_size = self.sample_size
sample_count = ceil(len(self._buf) / sample_size)
return (self._unpack_frame(
self._buf[i * sample_size:(i + 1) * sample_size]
) for i in range(sample_count)) |
Python | def frames(self) -> list:
"""
Return audio frames and clear buffer
:return: recorded frames as unpacked integers
"""
rv = list(self._frames)
self._buf = b''
return rv | def frames(self) -> list:
"""
Return audio frames and clear buffer
:return: recorded frames as unpacked integers
"""
rv = list(self._frames)
self._buf = b''
return rv |
Python | def _unpack_frame(frame: bytes) -> int:
"""
Convert an audio frame to an 32 bit integer
:param frame: Audio frame
:return: Unpacked audio frame
"""
return struct.unpack('i', frame)[0] | def _unpack_frame(frame: bytes) -> int:
"""
Convert an audio frame to an 32 bit integer
:param frame: Audio frame
:return: Unpacked audio frame
"""
return struct.unpack('i', frame)[0] |
Python | def quit(self):
"""
Close stream and terminate connection to audio input.
"""
self._stream.stop_stream()
self._stream.close()
self._sys.terminate() | def quit(self):
"""
Close stream and terminate connection to audio input.
"""
self._stream.stop_stream()
self._stream.close()
self._sys.terminate() |
Python | def check_health():
"""
Service health check
---
responses:
200:
description: Service is running OK
"""
message = 'Service running OK'
logging.info(message)
return message | def check_health():
"""
Service health check
---
responses:
200:
description: Service is running OK
"""
message = 'Service running OK'
logging.info(message)
return message |
Python | def write_recursive_hdf5(dataset: Dataset, group: Group) -> None:
"""Utility function to write a Group recursively in an HDF5 file.
Parameters
----------
dataset
Dataset in the HDF5 file.
group
Group to write in the HDF5 file.
Returns
-------
:
Warning
-------
Only :class:`str`, :class:`float`, :class:`int` and :class:`~numpy.ndarray`
types are currently supported for recursive writting in an HDF5 :class:`~h5py.Dataset`.
:class:`dict` and :class:`list` types will be convertet to :class:`str`, which is in
turn saved as :class:`bytes` in the HDF5 database.
If read with :func:`read_hdf5`, such records will be automatically converted to their
original type in the group.
"""
# accepted type variables for recursive writting
accepted_types = (str, float, int, ndarray)
converted_types = (dict, list)
for key in dir(group):
if '__' not in key:
record =getattr(group,key)
#vtype = type(record).__name__
if isinstance(record, accepted_types):
dataset.create_dataset(key, data=record)
elif isinstance(record, converted_types):
dataset.create_dataset(key, data=str(record))
return | def write_recursive_hdf5(dataset: Dataset, group: Group) -> None:
"""Utility function to write a Group recursively in an HDF5 file.
Parameters
----------
dataset
Dataset in the HDF5 file.
group
Group to write in the HDF5 file.
Returns
-------
:
Warning
-------
Only :class:`str`, :class:`float`, :class:`int` and :class:`~numpy.ndarray`
types are currently supported for recursive writting in an HDF5 :class:`~h5py.Dataset`.
:class:`dict` and :class:`list` types will be convertet to :class:`str`, which is in
turn saved as :class:`bytes` in the HDF5 database.
If read with :func:`read_hdf5`, such records will be automatically converted to their
original type in the group.
"""
# accepted type variables for recursive writting
accepted_types = (str, float, int, ndarray)
converted_types = (dict, list)
for key in dir(group):
if '__' not in key:
record =getattr(group,key)
#vtype = type(record).__name__
if isinstance(record, accepted_types):
dataset.create_dataset(key, data=record)
elif isinstance(record, converted_types):
dataset.create_dataset(key, data=str(record))
return |
Python | def summary_hdf5(fpath: Path, regex: str=None, optional: Optional[list]=None,
**pre_edge_kws:dict) -> Report:
"""Returns a summary report of datasets in an HDF5 file.
Parameters
----------
fpath
Path to HDF5 file.
regex
Search string to filter results by dataset name. See Notes for details.
The default is None.
optional
List with optional parameters. See Notes for details.
The default is None.
pre_edge_kws
Dictionary with arguments for :func:`~araucaria.xas.normalize.pre_edge`.
Returns
-------
:
Report for datasets in the HDF5 file.
Raises
------
IOError
If the HDF5 file does not exist in the specified path.
Notes
-----
Summary data includes the following:
1. Dataset index.
2. Dataset name.
3. Measurement mode.
4. Numbers of scans.
5. Absorption edge step :math:`\Delta\mu(E_0)`, if ``optional=['edge_step']``.
6. Absorption threshold energy :math:`E_0`, if ``optional=['e0']``.
7. Merged scans, if ``optional=['merged_scans']``.
8. Optional parameters if they exist as attributes in the dataset.
A ``regex`` value can be used to filter dataset names based
on a regular expression (reges). For valid regex syntax, please
check the documentation of the module :mod:`re`.
The number of scans and names of merged files are retrieved
from the ``merged_scans`` attribute of the HDF5 dataset.
The absorption threshold and the edge step are retrieved by
calling the function :func:`~araucaria.xas.normalize.pre_edge`.
Optional parameters will be retrieved from the dataset as
attributes. Currently only :class:`str`, :class:`float` or
:class:`int` will be retrieved. Otherswise an empty character
will be printed in the report.
See also
--------
:func:`read_hdf5`
:class:`~araucaria.main.report.Report`
Examples
--------
>>> from araucaria.testdata import get_testpath
>>> from araucaria.io import summary_hdf5
>>> fpath = get_testpath('Fe_database.h5')
>>> # printing default summary
>>> report = summary_hdf5(fpath)
>>> report.show()
=================================
id dataset mode n
=================================
1 FeIISO4_20K mu 5
2 Fe_Foil mu_ref 5
3 Ferrihydrite_20K mu 5
4 Goethite_20K mu 5
=================================
>>> # printing summary with merged scans of Goethite groups
>>> report = summary_hdf5(fpath, regex='Goe', optional=['merged_scans'])
>>> report.show()
=======================================================
id dataset mode n merged_scans
=======================================================
1 Goethite_20K mu 5 20K_GOE_Fe_K_240.00000.xdi
20K_GOE_Fe_K_240.00001.xdi
20K_GOE_Fe_K_240.00002.xdi
20K_GOE_Fe_K_240.00003.xdi
20K_GOE_Fe_K_240.00004.xdi
=======================================================
>>> # printing custom parameters
>>> from araucaria.testdata import get_testpath
>>> from araucaria.io import read_xmu, write_hdf5
>>> fpath = get_testpath('xmu_testfile.xmu')
>>> # extracting mu and mu_ref scans
>>> group_mu = read_xmu(fpath, scan='mu')
>>> # adding additional attributes
>>> group_mu.symbol = 'Zn'
>>> group_mu.temp = 25.0
>>> # saving a new hdf5 file
>>> write_hdf5('database2.h5', group_mu, name='xmu_testfile', replace=True)
xmu_testfile written to database2.h5.
>>> report = summary_hdf5('database2.h5', optional=['symbol','temp'])
>>> report.show()
=========================================
id dataset mode n symbol temp
=========================================
1 xmu_testfile mu 1 Zn 25
=========================================
"""
# verifying existence of path:
if isfile(fpath):
hdf5 = File(fpath, "r")
else:
raise IOError("file %s does not exists." % fpath)
# list with parameter names
field_names = ['id', 'dataset', 'mode', 'n']
opt_list = ['merged_scans', 'edge_step', 'e0']
if pre_edge_kws == {}:
# default values
pre_edge_kws={'pre_range':[-150,-50], 'nnorm':3, 'post_range':[150, inf]}
# verifying optional values
if optional is not None:
for opt_val in optional:
field_names.append(opt_val)
# instanciating report class
report = Report()
report.set_columns(field_names)
# number of records
keys = list(hdf5.keys())
if regex is None:
pass
else:
index = []
for i, key in enumerate(keys):
if search(regex, key) is None:
pass
else:
index.append(i)
keys = [keys[i] for i in index]
nkeys = len(keys)
for i, key in enumerate(keys):
data = read_hdf5(fpath, str(key))
scanval = data.get_mode()
extra_content = False # aux variable for 'merged_scans'
try:
# merged_scans is saved as string, so we count the number of commas
nscans = hdf5[key]['merged_scans'].asstr()[()].count(',') + 1
except:
nscans = 1
field_vals = [i+1, key, scanval, nscans]
if optional is not None:
for j, opt_val in enumerate(optional):
if opt_val == 'merged_scans':
if i == 0:
# storing the col merge_index
merge_index = len(field_vals)
try:
list_scans = literal_eval(hdf5[key]['merged_scans'].asstr()[()] )
field_vals.append(list_scans[0])
extra_content = True
except:
field_vals.append('None')
elif opt_val in opt_list[1:]:
out = pre_edge(data, **pre_edge_kws)
field_vals.append(out[opt_val])
else:
# custom optional field
try:
val = hdf5[key][opt_val]
if isinstance(val[()], (int, float)):
# if val is int or float print it
field_vals.append(val[()])
elif isinstance(val[()], bytes):
# if val is bytes we convert it and check
val = convert_bytes_hdf5(val)
if isinstance(val, str):
field_vals.append(val)
else:
field_vals.append('')
else:
field_vals.append('')
except:
field_vals.append('')
report.add_row(field_vals)
if extra_content:
for item in list_scans[1:]:
field_vals = []
for j,index in enumerate(field_names):
if j != merge_index:
field_vals.append('')
else:
field_vals.append(item)
report.add_row(field_vals)
if i < (nkeys - 1):
report.add_midrule()
hdf5.close()
return report | def summary_hdf5(fpath: Path, regex: str=None, optional: Optional[list]=None,
**pre_edge_kws:dict) -> Report:
"""Returns a summary report of datasets in an HDF5 file.
Parameters
----------
fpath
Path to HDF5 file.
regex
Search string to filter results by dataset name. See Notes for details.
The default is None.
optional
List with optional parameters. See Notes for details.
The default is None.
pre_edge_kws
Dictionary with arguments for :func:`~araucaria.xas.normalize.pre_edge`.
Returns
-------
:
Report for datasets in the HDF5 file.
Raises
------
IOError
If the HDF5 file does not exist in the specified path.
Notes
-----
Summary data includes the following:
1. Dataset index.
2. Dataset name.
3. Measurement mode.
4. Numbers of scans.
5. Absorption edge step :math:`\Delta\mu(E_0)`, if ``optional=['edge_step']``.
6. Absorption threshold energy :math:`E_0`, if ``optional=['e0']``.
7. Merged scans, if ``optional=['merged_scans']``.
8. Optional parameters if they exist as attributes in the dataset.
A ``regex`` value can be used to filter dataset names based
on a regular expression (reges). For valid regex syntax, please
check the documentation of the module :mod:`re`.
The number of scans and names of merged files are retrieved
from the ``merged_scans`` attribute of the HDF5 dataset.
The absorption threshold and the edge step are retrieved by
calling the function :func:`~araucaria.xas.normalize.pre_edge`.
Optional parameters will be retrieved from the dataset as
attributes. Currently only :class:`str`, :class:`float` or
:class:`int` will be retrieved. Otherswise an empty character
will be printed in the report.
See also
--------
:func:`read_hdf5`
:class:`~araucaria.main.report.Report`
Examples
--------
>>> from araucaria.testdata import get_testpath
>>> from araucaria.io import summary_hdf5
>>> fpath = get_testpath('Fe_database.h5')
>>> # printing default summary
>>> report = summary_hdf5(fpath)
>>> report.show()
=================================
id dataset mode n
=================================
1 FeIISO4_20K mu 5
2 Fe_Foil mu_ref 5
3 Ferrihydrite_20K mu 5
4 Goethite_20K mu 5
=================================
>>> # printing summary with merged scans of Goethite groups
>>> report = summary_hdf5(fpath, regex='Goe', optional=['merged_scans'])
>>> report.show()
=======================================================
id dataset mode n merged_scans
=======================================================
1 Goethite_20K mu 5 20K_GOE_Fe_K_240.00000.xdi
20K_GOE_Fe_K_240.00001.xdi
20K_GOE_Fe_K_240.00002.xdi
20K_GOE_Fe_K_240.00003.xdi
20K_GOE_Fe_K_240.00004.xdi
=======================================================
>>> # printing custom parameters
>>> from araucaria.testdata import get_testpath
>>> from araucaria.io import read_xmu, write_hdf5
>>> fpath = get_testpath('xmu_testfile.xmu')
>>> # extracting mu and mu_ref scans
>>> group_mu = read_xmu(fpath, scan='mu')
>>> # adding additional attributes
>>> group_mu.symbol = 'Zn'
>>> group_mu.temp = 25.0
>>> # saving a new hdf5 file
>>> write_hdf5('database2.h5', group_mu, name='xmu_testfile', replace=True)
xmu_testfile written to database2.h5.
>>> report = summary_hdf5('database2.h5', optional=['symbol','temp'])
>>> report.show()
=========================================
id dataset mode n symbol temp
=========================================
1 xmu_testfile mu 1 Zn 25
=========================================
"""
# verifying existence of path:
if isfile(fpath):
hdf5 = File(fpath, "r")
else:
raise IOError("file %s does not exists." % fpath)
# list with parameter names
field_names = ['id', 'dataset', 'mode', 'n']
opt_list = ['merged_scans', 'edge_step', 'e0']
if pre_edge_kws == {}:
# default values
pre_edge_kws={'pre_range':[-150,-50], 'nnorm':3, 'post_range':[150, inf]}
# verifying optional values
if optional is not None:
for opt_val in optional:
field_names.append(opt_val)
# instanciating report class
report = Report()
report.set_columns(field_names)
# number of records
keys = list(hdf5.keys())
if regex is None:
pass
else:
index = []
for i, key in enumerate(keys):
if search(regex, key) is None:
pass
else:
index.append(i)
keys = [keys[i] for i in index]
nkeys = len(keys)
for i, key in enumerate(keys):
data = read_hdf5(fpath, str(key))
scanval = data.get_mode()
extra_content = False # aux variable for 'merged_scans'
try:
# merged_scans is saved as string, so we count the number of commas
nscans = hdf5[key]['merged_scans'].asstr()[()].count(',') + 1
except:
nscans = 1
field_vals = [i+1, key, scanval, nscans]
if optional is not None:
for j, opt_val in enumerate(optional):
if opt_val == 'merged_scans':
if i == 0:
# storing the col merge_index
merge_index = len(field_vals)
try:
list_scans = literal_eval(hdf5[key]['merged_scans'].asstr()[()] )
field_vals.append(list_scans[0])
extra_content = True
except:
field_vals.append('None')
elif opt_val in opt_list[1:]:
out = pre_edge(data, **pre_edge_kws)
field_vals.append(out[opt_val])
else:
# custom optional field
try:
val = hdf5[key][opt_val]
if isinstance(val[()], (int, float)):
# if val is int or float print it
field_vals.append(val[()])
elif isinstance(val[()], bytes):
# if val is bytes we convert it and check
val = convert_bytes_hdf5(val)
if isinstance(val, str):
field_vals.append(val)
else:
field_vals.append('')
else:
field_vals.append('')
except:
field_vals.append('')
report.add_row(field_vals)
if extra_content:
for item in list_scans[1:]:
field_vals = []
for j,index in enumerate(field_names):
if j != merge_index:
field_vals.append('')
else:
field_vals.append(item)
report.add_row(field_vals)
if i < (nkeys - 1):
report.add_midrule()
hdf5.close()
return report |
Python | def deglitch(group: Group, e_window: Union[str,list]='xas', sg_window_length: int=9,
sg_polyorder:int =3, alpha: float=.025,
max_glitches: Union[int,str]='default', max_glitch_length: int=4,
update: bool=False) -> dict:
"""Algorithm to deglitch a XAFS spectrum.
Parameters
----------
group
Group containing the spectrum to deglitch.
e_window
Energy window to seach for outliers.
Oprions are 'xas', 'xanes' and 'exafs'.
Alternatively a list with 2 floats can be provided for
the start and end energy for the search.
See the Notes for further details. The default is 'xas'.
sg_window_length
Windows length for the Savitzky-Golay filter on the normalized spectrum.
Must be an odd value. The default is 7.
sg_polyorder
Polynomial order for the Savitzky-Golay filter on the normalized spectrum.
The default is 3.
alpha
Significance level for generalized ESD test for outliers.
The default is 0.025.
max_glitches
Maximum number of outliers to remove.
The default is the floor division of the array length by 10.
max_glitch_length
Maximum length of glitch in energy points. The default is 4.
update
Indicates if the group should be updated with the autobk attributes.
The default is False.
Returns
-------
:
Dictionary with the following arguments:
- ``index_glitches`` : indices of glitches in the original energy array.
- ``energy_glitches``: glitches in the original energy array.
- ``energy`` : deglitched energy array.
- ``mu`` : deglitched array. Returned if ``group.get_mode() = 'mu'``.
- ``fluo`` : deglitched array. Returned if ``group.get_mode() = 'fluo'``.
- ``mu_ref`` : deglitched array. Returned if ``group.get_mode() = 'mu_ref'``.
- ``deglitch_pars`` : dictionary with deglitch parameters.
Raises
------
TypeError
If ``group`` is not a valid Group instance.
AttributeError
If attribute ``energy`` does not exist in ``group``.
Warning
-------
Running :func:`~araucaria.xas.deglitch.deglitch` with ``update=True`` will overwrite
the ``energy`` and the absorption attribute of ``group``.
Notes
-----
This function deglitches a XAFS spectrum through a
two-step fitting with Savitzky-Golay filter and outlier
identification with a generalized extreme Studentized deviate (ESD)
test [1]_.
- ``e_window='xas'`` considers the full spectrum for deglitching.
- ``e_window='xanes'`` considers the beginning of the energy array
up to 150 eV above :math:`E_0`.
- ``e_window='exafs'`` considers from 150eV above :math:`E_0` to the
end of the energy array
- ``e_window=[float,float]`` provides start and end energies in eV.
References
----------
.. [1] Wallace, S. M., Alsina, M. A., & Gaillard, J. F. (2021)
"An algorithm for the automatic deglitching of x-ray absorption
spectroscopy data". J. Synchrotron Rad. 28, https://doi.org/10.1107/S1600577521003611
Example
-------
.. plot::
:context: reset
>>> from numpy import allclose
>>> from araucaria.testdata import get_testpath
>>> from araucaria import Group
>>> from araucaria.io import read_dnd
>>> from araucaria.xas import deglitch, pre_edge, autobk
>>> from araucaria.utils import check_objattrs
>>> fpath = get_testpath('dnd_glitchfile.dat')
>>> group = read_dnd(fpath, scan='fluo') # extracting fluo and mu_ref scans
>>> cgroup = group.copy()
>>> degli = deglitch(cgroup, update=True)
>>> attrs = ['index_glitches', 'energy_glitches', 'deglitch_pars']
>>> check_objattrs(cgroup, Group, attrs)
[True, True, True]
>>> allclose(cgroup.energy_glitches, group.energy[cgroup.index_glitches])
True
>>> print(cgroup.energy_glitches)
[7552.2789 7548.1747 7390.512 7387.2613]
>>> # plotting original and deglitched spectrum
>>> from araucaria.plot import fig_xas_template
>>> import matplotlib.pyplot as plt
>>> for g in [group, cgroup]:
... pre = pre_edge(g, update=True)
... autbk = autobk(g, update=True)
>>> fig, ax = fig_xas_template(panels='xe')
>>> line = ax[0].plot(group.energy, group.norm, label='original', color='tab:red')
>>> line = ax[0].plot(cgroup.energy, cgroup.norm, label ='degliched', color='k')
>>> line = ax[1].plot(group.k, group.k**2 * group.chi, color='tab:red')
>>> line = ax[1].plot(cgroup.k, cgroup.k**2 * cgroup.chi, color='k')
>>> leg = ax[0].legend()
>>> fig.tight_layout()
>>> plt.show(block=False)
"""
# checking class and attributes
check_objattrs(group, Group, attrlist=['energy'], exceptions=True)
# extracting data and mu as independent arrays
energy = group.energy
mode = group.get_mode()
mu = getattr(group, mode)
# computing the energy window to perform the deglitch:
e_lim = 150 # energy limit to separates xanes from exafs
e_windows = ['xas', 'xanes', 'exafs']
if e_window in e_windows:
if e_window =='xas':
e_window = [energy[0], energy[-1]]
else:
if 'e0' not in dir(group):
e0 = find_e0(group)
else:
e0 = getattr(group, 'e0')
if e_window =='xanes':
e_window = [energy[0], e0 + e_lim]
else: # exafs
e_window = [e0 + e_lim, energy[-1]]
# energy indexes to perform deglitch
index = where((energy >= e_window[0]) & (energy <= e_window[1]))[0]
# savitzky-golay filter applied to entire mu array
sg_init = savgol_filter(mu, sg_window_length, sg_polyorder)
# computing difference between normalized spectrum and savitzky-golay
res1 = mu - sg_init
# computing window size and rolling median
win_size = 2 * (sg_window_length + (max_glitch_length - 1)) + 1
roll_mad1 = roll_med(abs(res1), window = win_size, edgemethod='calc')
res_norm = res1 / roll_mad1
# if max_glitches is not set to an int, it will be set to the default
if type(max_glitches) != int or max_glitches == 'default':
max_glitches = len(res1)//10
# finds outliers in residuals between data and savitzky-golay filter
report, out1 = genesd(res_norm[index], max_glitches, alpha)
# compensating for nonzero starting index in e_window
if index[0] != 0:
out1 = out1 + index[0]
# deglitching ends here if no outliers are found in this first stage
if len(out1) == 0:
index_glitches = None
energy_glitches = None
else:
# creating additional copy of mu
mu_copy = copy(mu)
# removes points that are poorly fitted by the S-G filter
e2 = delete(energy, out1)
n2 = delete(mu, out1)
#interpolates mu at the removed energy points
f = interp1d(e2, n2, kind='cubic')
interp_pts = f(energy[out1])
# inserts interpolated points into normalized data
for i, point in enumerate(out1):
mu_copy[point] = interp_pts[i]
# fits mu with the interpolated points
sg_final = savgol_filter(mu_copy, sg_window_length, sg_polyorder)
res2 = mu - sg_final
win_size = (2*max_glitch_length) + 1
roll_mad2 = roll_med(abs(res2), window = win_size, edgemethod='calc')
res_norm2 = res2 / roll_mad2
# normalizing the standard deviation to the same window as the savitzky-golay filter
# allows to tackle the full spectrum, accounting for the data noise.
report, glitches_init = genesd(res_norm2[index], max_glitches, alpha)
# compensating for nonzero starting index in e_window
if index[0] != 0:
glitches_init = glitches_init + index[0]
glitches = array([])
for glitch in glitches_init:
if True in where(abs(glitch-out1) < (sg_window_length//2) + 1, True, False):
glitches = append(glitches, glitch)
glitches[::-1].sort()
index_glitches = glitches.astype(int)
energy_glitches = energy[index_glitches]
if len(glitches) == 0:
index_glitches = None
energy_glitches = None
else:
# deglitching arrays
energy = delete(energy, index_glitches)
mu = delete(mu, index_glitches)
deglitch_pars = { 'e_window' : e_window,
'sg_window_length' : sg_window_length,
'sg_polyorder' : sg_polyorder,
'alpha' : alpha,
'max_glitches' : max_glitches,
'max_glitch_length' : max_glitch_length
}
content = { 'index_glitches' : index_glitches,
'energy_glitches': energy_glitches,
'energy' : energy,
mode : mu,
'deglitch_pars' : deglitch_pars,
}
if update:
group.add_content(content)
return content | def deglitch(group: Group, e_window: Union[str,list]='xas', sg_window_length: int=9,
sg_polyorder:int =3, alpha: float=.025,
max_glitches: Union[int,str]='default', max_glitch_length: int=4,
update: bool=False) -> dict:
"""Algorithm to deglitch a XAFS spectrum.
Parameters
----------
group
Group containing the spectrum to deglitch.
e_window
Energy window to seach for outliers.
Oprions are 'xas', 'xanes' and 'exafs'.
Alternatively a list with 2 floats can be provided for
the start and end energy for the search.
See the Notes for further details. The default is 'xas'.
sg_window_length
Windows length for the Savitzky-Golay filter on the normalized spectrum.
Must be an odd value. The default is 7.
sg_polyorder
Polynomial order for the Savitzky-Golay filter on the normalized spectrum.
The default is 3.
alpha
Significance level for generalized ESD test for outliers.
The default is 0.025.
max_glitches
Maximum number of outliers to remove.
The default is the floor division of the array length by 10.
max_glitch_length
Maximum length of glitch in energy points. The default is 4.
update
Indicates if the group should be updated with the autobk attributes.
The default is False.
Returns
-------
:
Dictionary with the following arguments:
- ``index_glitches`` : indices of glitches in the original energy array.
- ``energy_glitches``: glitches in the original energy array.
- ``energy`` : deglitched energy array.
- ``mu`` : deglitched array. Returned if ``group.get_mode() = 'mu'``.
- ``fluo`` : deglitched array. Returned if ``group.get_mode() = 'fluo'``.
- ``mu_ref`` : deglitched array. Returned if ``group.get_mode() = 'mu_ref'``.
- ``deglitch_pars`` : dictionary with deglitch parameters.
Raises
------
TypeError
If ``group`` is not a valid Group instance.
AttributeError
If attribute ``energy`` does not exist in ``group``.
Warning
-------
Running :func:`~araucaria.xas.deglitch.deglitch` with ``update=True`` will overwrite
the ``energy`` and the absorption attribute of ``group``.
Notes
-----
This function deglitches a XAFS spectrum through a
two-step fitting with Savitzky-Golay filter and outlier
identification with a generalized extreme Studentized deviate (ESD)
test [1]_.
- ``e_window='xas'`` considers the full spectrum for deglitching.
- ``e_window='xanes'`` considers the beginning of the energy array
up to 150 eV above :math:`E_0`.
- ``e_window='exafs'`` considers from 150eV above :math:`E_0` to the
end of the energy array
- ``e_window=[float,float]`` provides start and end energies in eV.
References
----------
.. [1] Wallace, S. M., Alsina, M. A., & Gaillard, J. F. (2021)
"An algorithm for the automatic deglitching of x-ray absorption
spectroscopy data". J. Synchrotron Rad. 28, https://doi.org/10.1107/S1600577521003611
Example
-------
.. plot::
:context: reset
>>> from numpy import allclose
>>> from araucaria.testdata import get_testpath
>>> from araucaria import Group
>>> from araucaria.io import read_dnd
>>> from araucaria.xas import deglitch, pre_edge, autobk
>>> from araucaria.utils import check_objattrs
>>> fpath = get_testpath('dnd_glitchfile.dat')
>>> group = read_dnd(fpath, scan='fluo') # extracting fluo and mu_ref scans
>>> cgroup = group.copy()
>>> degli = deglitch(cgroup, update=True)
>>> attrs = ['index_glitches', 'energy_glitches', 'deglitch_pars']
>>> check_objattrs(cgroup, Group, attrs)
[True, True, True]
>>> allclose(cgroup.energy_glitches, group.energy[cgroup.index_glitches])
True
>>> print(cgroup.energy_glitches)
[7552.2789 7548.1747 7390.512 7387.2613]
>>> # plotting original and deglitched spectrum
>>> from araucaria.plot import fig_xas_template
>>> import matplotlib.pyplot as plt
>>> for g in [group, cgroup]:
... pre = pre_edge(g, update=True)
... autbk = autobk(g, update=True)
>>> fig, ax = fig_xas_template(panels='xe')
>>> line = ax[0].plot(group.energy, group.norm, label='original', color='tab:red')
>>> line = ax[0].plot(cgroup.energy, cgroup.norm, label ='degliched', color='k')
>>> line = ax[1].plot(group.k, group.k**2 * group.chi, color='tab:red')
>>> line = ax[1].plot(cgroup.k, cgroup.k**2 * cgroup.chi, color='k')
>>> leg = ax[0].legend()
>>> fig.tight_layout()
>>> plt.show(block=False)
"""
# checking class and attributes
check_objattrs(group, Group, attrlist=['energy'], exceptions=True)
# extracting data and mu as independent arrays
energy = group.energy
mode = group.get_mode()
mu = getattr(group, mode)
# computing the energy window to perform the deglitch:
e_lim = 150 # energy limit to separates xanes from exafs
e_windows = ['xas', 'xanes', 'exafs']
if e_window in e_windows:
if e_window =='xas':
e_window = [energy[0], energy[-1]]
else:
if 'e0' not in dir(group):
e0 = find_e0(group)
else:
e0 = getattr(group, 'e0')
if e_window =='xanes':
e_window = [energy[0], e0 + e_lim]
else: # exafs
e_window = [e0 + e_lim, energy[-1]]
# energy indexes to perform deglitch
index = where((energy >= e_window[0]) & (energy <= e_window[1]))[0]
# savitzky-golay filter applied to entire mu array
sg_init = savgol_filter(mu, sg_window_length, sg_polyorder)
# computing difference between normalized spectrum and savitzky-golay
res1 = mu - sg_init
# computing window size and rolling median
win_size = 2 * (sg_window_length + (max_glitch_length - 1)) + 1
roll_mad1 = roll_med(abs(res1), window = win_size, edgemethod='calc')
res_norm = res1 / roll_mad1
# if max_glitches is not set to an int, it will be set to the default
if type(max_glitches) != int or max_glitches == 'default':
max_glitches = len(res1)//10
# finds outliers in residuals between data and savitzky-golay filter
report, out1 = genesd(res_norm[index], max_glitches, alpha)
# compensating for nonzero starting index in e_window
if index[0] != 0:
out1 = out1 + index[0]
# deglitching ends here if no outliers are found in this first stage
if len(out1) == 0:
index_glitches = None
energy_glitches = None
else:
# creating additional copy of mu
mu_copy = copy(mu)
# removes points that are poorly fitted by the S-G filter
e2 = delete(energy, out1)
n2 = delete(mu, out1)
#interpolates mu at the removed energy points
f = interp1d(e2, n2, kind='cubic')
interp_pts = f(energy[out1])
# inserts interpolated points into normalized data
for i, point in enumerate(out1):
mu_copy[point] = interp_pts[i]
# fits mu with the interpolated points
sg_final = savgol_filter(mu_copy, sg_window_length, sg_polyorder)
res2 = mu - sg_final
win_size = (2*max_glitch_length) + 1
roll_mad2 = roll_med(abs(res2), window = win_size, edgemethod='calc')
res_norm2 = res2 / roll_mad2
# normalizing the standard deviation to the same window as the savitzky-golay filter
# allows to tackle the full spectrum, accounting for the data noise.
report, glitches_init = genesd(res_norm2[index], max_glitches, alpha)
# compensating for nonzero starting index in e_window
if index[0] != 0:
glitches_init = glitches_init + index[0]
glitches = array([])
for glitch in glitches_init:
if True in where(abs(glitch-out1) < (sg_window_length//2) + 1, True, False):
glitches = append(glitches, glitch)
glitches[::-1].sort()
index_glitches = glitches.astype(int)
energy_glitches = energy[index_glitches]
if len(glitches) == 0:
index_glitches = None
energy_glitches = None
else:
# deglitching arrays
energy = delete(energy, index_glitches)
mu = delete(mu, index_glitches)
deglitch_pars = { 'e_window' : e_window,
'sg_window_length' : sg_window_length,
'sg_polyorder' : sg_polyorder,
'alpha' : alpha,
'max_glitches' : max_glitches,
'max_glitch_length' : max_glitch_length
}
content = { 'index_glitches' : index_glitches,
'energy_glitches': energy_glitches,
'energy' : energy,
mode : mu,
'deglitch_pars' : deglitch_pars,
}
if update:
group.add_content(content)
return content |
Python | def calibrate(group: Group, e0: float, update: bool=True) -> float:
"""Calibrates the absorption threshold energy of the reference scan.
Parameters
----------
group
Group containing the spectrum to calibrate.
e0
Arbitrary value for the absorption threshold.
update
Indicates if the group should be updated following calibration.
The default is True.
Returns
-------
:
Energy difference between ``e0`` and the initial energy threshold.
Raises
------
TypeError
If ``group`` is not a valid Group instance.
AttributeError
If attribute ``energy`` does not exist in ``group``.
AttributeError
If attribute ``mu_ref`` does not exist in ``group``.
Notes
-----
Calibration is performed by offsetting the ``group.energy`` attribute in order
to match the absorption threshold energy of ``group.mu_ref`` with the given ``e0``
value.
If ``update=True`` the following attributes of ``group`` will be modified or created:
- ``group.energy``: modified by the ``e_offset`` value.
- ``group.e_offset``: difference between ``e0`` and the initial| threshold energy.
If ``update=False`` the ``e_offset`` value will be returned but not stored
in ``group``.
Warning
-------
If ``e_offset`` already exists in the provided ``group``, the ``group.energy`` array will be
reverted to its original values before performing calibration with the new ``e0`` value.
See also
--------
~araucaria.xas.normalize.find_e0 : Finds the absorption threshold value.
Example
-------
>>> from araucaria.testdata import get_testpath
>>> from araucaria.io import read_dnd
>>> from araucaria.xas import calibrate
>>> fpath = get_testpath('dnd_testfile1.dat')
>>> group_mu = read_dnd(fpath, scan='mu') # extracting mu and mu_ref scans
>>> e0 = 29200 # threshold energy for calibration
>>> e_offset = calibrate(group_mu, e0, update=False) # energy offset
>>> print('%1.3f' % e_offset)
-3.249
"""
# checking class and attributes
check_objattrs(group, Group, attrlist=['energy', 'mu_ref'], exceptions=True)
# auxiliary energy array
energy = group.energy
if hasattr(group, 'e_offset'):
warn('group was already aligned or calibrated: resetting energy to original value.')
energy = group.energy - group.e_offset
# auxiliary group
calgroup = Group(**{'energy': energy, 'mu_ref': group.mu_ref})
e_offset = e0-find_e0(calgroup)
if update:
# updating the group
group.e_offset = e_offset
group.energy = energy + e_offset
return e_offset | def calibrate(group: Group, e0: float, update: bool=True) -> float:
"""Calibrates the absorption threshold energy of the reference scan.
Parameters
----------
group
Group containing the spectrum to calibrate.
e0
Arbitrary value for the absorption threshold.
update
Indicates if the group should be updated following calibration.
The default is True.
Returns
-------
:
Energy difference between ``e0`` and the initial energy threshold.
Raises
------
TypeError
If ``group`` is not a valid Group instance.
AttributeError
If attribute ``energy`` does not exist in ``group``.
AttributeError
If attribute ``mu_ref`` does not exist in ``group``.
Notes
-----
Calibration is performed by offsetting the ``group.energy`` attribute in order
to match the absorption threshold energy of ``group.mu_ref`` with the given ``e0``
value.
If ``update=True`` the following attributes of ``group`` will be modified or created:
- ``group.energy``: modified by the ``e_offset`` value.
- ``group.e_offset``: difference between ``e0`` and the initial| threshold energy.
If ``update=False`` the ``e_offset`` value will be returned but not stored
in ``group``.
Warning
-------
If ``e_offset`` already exists in the provided ``group``, the ``group.energy`` array will be
reverted to its original values before performing calibration with the new ``e0`` value.
See also
--------
~araucaria.xas.normalize.find_e0 : Finds the absorption threshold value.
Example
-------
>>> from araucaria.testdata import get_testpath
>>> from araucaria.io import read_dnd
>>> from araucaria.xas import calibrate
>>> fpath = get_testpath('dnd_testfile1.dat')
>>> group_mu = read_dnd(fpath, scan='mu') # extracting mu and mu_ref scans
>>> e0 = 29200 # threshold energy for calibration
>>> e_offset = calibrate(group_mu, e0, update=False) # energy offset
>>> print('%1.3f' % e_offset)
-3.249
"""
# checking class and attributes
check_objattrs(group, Group, attrlist=['energy', 'mu_ref'], exceptions=True)
# auxiliary energy array
energy = group.energy
if hasattr(group, 'e_offset'):
warn('group was already aligned or calibrated: resetting energy to original value.')
energy = group.energy - group.e_offset
# auxiliary group
calgroup = Group(**{'energy': energy, 'mu_ref': group.mu_ref})
e_offset = e0-find_e0(calgroup)
if update:
# updating the group
group.e_offset = e_offset
group.energy = energy + e_offset
return e_offset |
Python | def add_content(self, content: dict) -> None:
"""Adds content to the group.
Parameters
----------
content
Dictionary with content for the group.
Returns
-------
:
Raises
------
TypeError
If ``content`` is not a dictionary.
Example
-------
>>> from araucaria import Group
>>> from araucaria.utils import check_objattrs
>>> content = {'var': 'xas'}
>>> group = Group()
>>> group.add_content(content)
>>> check_objattrs(group, Group, attrlist=['name', 'var'])
[True, True]
"""
if not isinstance(content, dict):
raise TypeError('content is not a valid dictionary.')
else:
for key, val in content.items():
setattr(self, key, val) | def add_content(self, content: dict) -> None:
"""Adds content to the group.
Parameters
----------
content
Dictionary with content for the group.
Returns
-------
:
Raises
------
TypeError
If ``content`` is not a dictionary.
Example
-------
>>> from araucaria import Group
>>> from araucaria.utils import check_objattrs
>>> content = {'var': 'xas'}
>>> group = Group()
>>> group.add_content(content)
>>> check_objattrs(group, Group, attrlist=['name', 'var'])
[True, True]
"""
if not isinstance(content, dict):
raise TypeError('content is not a valid dictionary.')
else:
for key, val in content.items():
setattr(self, key, val) |
Python | def read_lcf_coefs(fpaths: List[Path], refgroup: str,
error: bool=True) -> Union[Tuple[List], list]:
"""Returns amplitude coefficients for a given LCF reference.
Amplitude coefficients are read directly from a list of paths
to LCF report files generated by :func:`~araucaria.io.io_write.write_lcf_report`.
Parameters
----------
fpaths
List of paths to valid LCF report files.
refgroup
Name of the reference group.
error
If True the error of the fit will also be returned.
The default is True.
Returns
-------
:
Amplitude coefficients and error for the reference in the LCF.
Raises
------
IOError
If a file does not exist in the specified path.
TypeError
If a file is not a valid LCF report.
ValueError
If ``refgroup`` was fitted during the LCF analysis (i.e. not a reference).
Examples
--------
>>> from araucaria.testdata import get_testpath
>>> from araucaria.io import read_lcf_coefs
>>> fpath = get_testpath('test_lcf_report.log')
>>> read_lcf_coefs([fpath], 'group1')
([0.40034377], [0.01195335])
>>> read_lcf_coefs([fpath], 'group2', error=False)
[0.59428689]
"""
# testing that the file exists
for fpath in fpaths:
if not isfile(fpath):
raise IOError('file %s does not exists.' % fpath)
vallist = [] # container for values
errlist = [] # container for errors
for fpath in fpaths:
getref = True # reference is always searched
getval = False # value is retrieved only if reference was used during the lcf
f = open(fpath, 'r')
fline = f.readline()
if 'lcf report' not in fline:
raise TypeError('%s is not a valid LCF report file.' % fpath)
while getref:
line = f.readline()
if refgroup in line:
# reference found in line
if 'scan' in line:
raise ValueError('%s was fitted in %s.' %(refgroup, fpath))
else:
# we extract the standard index
index = line.split()[0][-1]
stdval = "amp"+index
getref = False
getval = True
elif "[[Fit Statistics]]" in line:
# This line indicates that we already passed the [[Group]] section
# There is nothing else to search so return zeroes instead
vallist = append(vallist,0.00)
errlist = append(errlist,0.00)
getref = False
break
while getval:
line = f.readline()
if stdval in line:
val = float(line.split()[1])
err = float(line.split()[3])
vallist.append(val)
errlist.append(err)
getval = False
f.close()
if error:
return (vallist, errlist)
else:
return (vallist) | def read_lcf_coefs(fpaths: List[Path], refgroup: str,
error: bool=True) -> Union[Tuple[List], list]:
"""Returns amplitude coefficients for a given LCF reference.
Amplitude coefficients are read directly from a list of paths
to LCF report files generated by :func:`~araucaria.io.io_write.write_lcf_report`.
Parameters
----------
fpaths
List of paths to valid LCF report files.
refgroup
Name of the reference group.
error
If True the error of the fit will also be returned.
The default is True.
Returns
-------
:
Amplitude coefficients and error for the reference in the LCF.
Raises
------
IOError
If a file does not exist in the specified path.
TypeError
If a file is not a valid LCF report.
ValueError
If ``refgroup`` was fitted during the LCF analysis (i.e. not a reference).
Examples
--------
>>> from araucaria.testdata import get_testpath
>>> from araucaria.io import read_lcf_coefs
>>> fpath = get_testpath('test_lcf_report.log')
>>> read_lcf_coefs([fpath], 'group1')
([0.40034377], [0.01195335])
>>> read_lcf_coefs([fpath], 'group2', error=False)
[0.59428689]
"""
# testing that the file exists
for fpath in fpaths:
if not isfile(fpath):
raise IOError('file %s does not exists.' % fpath)
vallist = [] # container for values
errlist = [] # container for errors
for fpath in fpaths:
getref = True # reference is always searched
getval = False # value is retrieved only if reference was used during the lcf
f = open(fpath, 'r')
fline = f.readline()
if 'lcf report' not in fline:
raise TypeError('%s is not a valid LCF report file.' % fpath)
while getref:
line = f.readline()
if refgroup in line:
# reference found in line
if 'scan' in line:
raise ValueError('%s was fitted in %s.' %(refgroup, fpath))
else:
# we extract the standard index
index = line.split()[0][-1]
stdval = "amp"+index
getref = False
getval = True
elif "[[Fit Statistics]]" in line:
# This line indicates that we already passed the [[Group]] section
# There is nothing else to search so return zeroes instead
vallist = append(vallist,0.00)
errlist = append(errlist,0.00)
getref = False
break
while getval:
line = f.readline()
if stdval in line:
val = float(line.split()[1])
err = float(line.split()[3])
vallist.append(val)
errlist.append(err)
getval = False
f.close()
if error:
return (vallist, errlist)
else:
return (vallist) |
Python | def guess_edge(group: Group, e0: float=None, update:bool =False) -> dict:
"""Estimates the nearest absorption edge for a XAFS scan.
Parameters
----------
group
Group containing the spectrum for pre-edge substraction and normalization.
e0
Absorption threshold energy. If None it will seach for the
value stored in ``group.e0``. Otherwise it will be calculated
using :func:`~araucaria.xas.normalize.find_e0`. with default
parameters.
update
Indicates if the group should be updated with the normalization attributes.
The default is False.
Returns
-------
:
Dictionary with the following arguments:
- ``atsym`` : atomic symbol for the absorption edge.
- ``edge`` : absorption edge in Siegbanh notation.
Raises
------
TypeError
If ``group`` is not a valid Group instance.
AttributeError
If attribute ``energy`` does not exist in ``group``.
IndexError
If ``e0`` is outside the range of ``group.energy``.
See also
--------
:func:`~araucaria.xrdb.xray.nearest_edge`
Returns the nearest x-ray edge for a given energy.
Example
-------
>>> from araucaria.testdata import get_testpath
>>> from araucaria import Group
>>> from araucaria.io import read_dnd
>>> from araucaria.xas import find_e0
>>> from araucaria.utils import check_objattrs
>>> fpath = get_testpath('dnd_testfile1.dat')
>>> group = read_dnd(fpath, scan='mu') # extracting mu and mu_ref scans
>>> attrs = ['atsym', 'edge']
>>> e0 = find_e0(group)
>>> edge = guess_edge(group, e0, update=True)
>>> check_objattrs(group, Group, attrs)
[True, True]
>>> print(edge)
{'atsym': 'Sn', 'edge': 'K'}
"""
# checking class and attributes
check_objattrs(group, Group, attrlist=['energy'], exceptions=True)
# storing energy and mu as indepedent arrays
energy = group.energy
# assigning e0
if e0 is not None:
if e0 < min(energy) or e0 > max(energy):
raise IndexError('e0 is outside the energy range.')
else:
e0 = energy[index_nearest(energy, e0)]
elif hasattr(group, 'e0'):
if group.e0 < min(energy) or group.e0 > max(energy):
raise IndexError('group.e0 is outside the energy range.')
else:
e0 = energy[index_nearest(energy, group.e0)]
else:
e0 = find_e0(group, update=False)
# estimating edge
edge = nearest_edge(e0)
content = {'atsym' : edge[0],
'edge' : edge[1],
}
if update:
group.add_content(content)
return content | def guess_edge(group: Group, e0: float=None, update:bool =False) -> dict:
"""Estimates the nearest absorption edge for a XAFS scan.
Parameters
----------
group
Group containing the spectrum for pre-edge substraction and normalization.
e0
Absorption threshold energy. If None it will seach for the
value stored in ``group.e0``. Otherwise it will be calculated
using :func:`~araucaria.xas.normalize.find_e0`. with default
parameters.
update
Indicates if the group should be updated with the normalization attributes.
The default is False.
Returns
-------
:
Dictionary with the following arguments:
- ``atsym`` : atomic symbol for the absorption edge.
- ``edge`` : absorption edge in Siegbanh notation.
Raises
------
TypeError
If ``group`` is not a valid Group instance.
AttributeError
If attribute ``energy`` does not exist in ``group``.
IndexError
If ``e0`` is outside the range of ``group.energy``.
See also
--------
:func:`~araucaria.xrdb.xray.nearest_edge`
Returns the nearest x-ray edge for a given energy.
Example
-------
>>> from araucaria.testdata import get_testpath
>>> from araucaria import Group
>>> from araucaria.io import read_dnd
>>> from araucaria.xas import find_e0
>>> from araucaria.utils import check_objattrs
>>> fpath = get_testpath('dnd_testfile1.dat')
>>> group = read_dnd(fpath, scan='mu') # extracting mu and mu_ref scans
>>> attrs = ['atsym', 'edge']
>>> e0 = find_e0(group)
>>> edge = guess_edge(group, e0, update=True)
>>> check_objattrs(group, Group, attrs)
[True, True]
>>> print(edge)
{'atsym': 'Sn', 'edge': 'K'}
"""
# checking class and attributes
check_objattrs(group, Group, attrlist=['energy'], exceptions=True)
# storing energy and mu as indepedent arrays
energy = group.energy
# assigning e0
if e0 is not None:
if e0 < min(energy) or e0 > max(energy):
raise IndexError('e0 is outside the energy range.')
else:
e0 = energy[index_nearest(energy, e0)]
elif hasattr(group, 'e0'):
if group.e0 < min(energy) or group.e0 > max(energy):
raise IndexError('group.e0 is outside the energy range.')
else:
e0 = energy[index_nearest(energy, group.e0)]
else:
e0 = find_e0(group, update=False)
# estimating edge
edge = nearest_edge(e0)
content = {'atsym' : edge[0],
'edge' : edge[1],
}
if update:
group.add_content(content)
return content |
Python | def pre_edge(group: Group, e0: float=None, nvict: int=0, nnorm: int=2,
pre_range: list=[-inf,-50], post_range: list=[100,inf],
update:bool =False) -> dict:
"""Pre-edge substaction and normalization of a XAFS scan.
Parameters
----------
group
Group containing the spectrum for pre-edge substraction and normalization.
e0
Absorption threshold energy. If None it will seach for the
value stored in ``group.e0``. Otherwise it will be calculated
using :func:`~araucaria.xas.normalize.find_e0`. with default
parameters.
nvict
Energy exponent for pre-edge fit with a Victoreen polynomial.
The default is 0. See Notes for details.
nnorm
Degree of polynomial for post-edge fit. The default is 2.
pre_range
Energy range with respect to `e0` for the pre-edge fit.
The default is [-:data:`~numpy.inf`, -50].
post_range
Energy range with respect to `e0` for the post-edge fit.
The default is [100, :data:`~numpy.inf`].
update
Indicates if the group should be updated with the normalization attributes.
The default is False.
Returns
-------
:
Dictionary with the following arguments:
- ``e0`` : absorption threshold energy :math:`E_0`.
- ``edge_step`` : absorption edge step :math:`\Delta \mu(E_0)`.
- ``norm`` : array with normalized :math:`\mu(E)`.
- ``flat`` : array with flattened :math:`\mu(E)`.
- ``pre_edge`` : fitted pre-edge polynomial.
- ``post_edge`` : fitted post-edge polynomial.
- ``pre_coefs`` : coefficients for the pre-edge Victoreen polynomial.
- ``post_coefs`` : coefficients for the post-edge polynomial.
- ``pre_edge_pars``: dictionary with pre-edge parameters.
Raises
------
TypeError
If ``group`` is not a valid Group instance.
AttributeError
If attribute ``energy`` does not exist in ``group``.
IndexError
If ``e0`` is outside the range of ``group.energy``.
ValueError
If ``pre_range`` contains less than two energy points.
ValueError
If ``post_range`` contains less than two energy points.
Warning
-------
A warning will be raised if the degree of the post-edge polynomial is larger than 3.
See also
--------
:func:`~araucaria.plot.fig_pre_edge.fig_pre_edge`
Plot the results of pre-edge substraction and normalization.
Notes
-----
Pre-edge substraction and normalization is performed as follows:
1. The absorption threshold is determined (if ``e0`` or ``group.e0`` is not supplied).
2. A Victoreen polymonial with energy exponent ``nvict`` is fitted to the region below
the edge, as specified by ``pre_range`` (2 coefficients are fitted):
:math:`\mu(E) \cdot E^{nvict} = m \cdot E + b`
3. A polymonial of degree ``nnorm`` is fitted to the region above the edge, as specified
by ``post_range`` (``nnorm`` + 1 coefficients are fitted).
4. The edge step is deterimned by extrapolating both curves to `e0`.
5. A flattetned spectrum is calculated by removing the polynomial above the edge from the
normalized spectrum, while maintaining the offset of the polynomial at ``e0``.
If ``update=True`` the contents of the returned dictionary will be
included as attributes of ``group``.
Example
-------
>>> from araucaria.testdata import get_testpath
>>> from araucaria import Group
>>> from araucaria.io import read_dnd
>>> from araucaria.xas import pre_edge
>>> from araucaria.utils import check_objattrs
>>> fpath = get_testpath('dnd_testfile1.dat')
>>> group = read_dnd(fpath, scan='mu') # extracting mu and mu_ref scans
>>> attrs = ['e0', 'edge_step', 'pre_edge', 'post_edge', 'norm', 'flat']
>>> pre = pre_edge(group, update=True)
>>> check_objattrs(group, Group, attrs)
[True, True, True, True, True, True]
"""
# checking class and attributes
check_objattrs(group, Group, attrlist=['energy'], exceptions=True)
# storing energy and mu as indepedent arrays
energy = group.energy
mu = getattr(group, group.get_mode())
# assigning e0
if e0 is not None:
if e0 < min(energy) or e0 > max(energy):
raise IndexError('e0 is outside the energy range.')
else:
e0 = energy[index_nearest(energy, e0)]
elif hasattr(group, 'e0'):
if group.e0 < min(energy) or group.e0 > max(energy):
raise IndexError('group.e0 is outside the energy range.')
else:
e0 = energy[index_nearest(energy, group.e0)]
else:
e0 = find_e0(group, update=False)
# storing pre_edge_pars in dict
pre_edge_pars = {'pre_range': pre_range,
'post_range': post_range}
# assiging values inside the energy array
prerange = check_xrange(pre_range, energy, refval=e0)
postrange = check_xrange(post_range, energy, refval=e0)
# retrieving pre-edge indices
# 1 is added to pre_index[1] to include it during slicing
pre_index = [0,-1]
pre_index[0] = index_nearest(energy, prerange[0] + e0, kind='lower')
pre_index[1] = index_nearest(energy, prerange[1] + e0)
# indices must be at least 2 values apart
if ptp(pre_index) < 2:
raise ValueError('energy range for pre-edge fit provides less than 2 points. consider increasing it.')
#pre_index[1] = min(len(energy), pre_index[0] + 2)
omu = mu * energy**nvict
pre_coefs = polyfit(energy[pre_index[0]:pre_index[1]],
omu[pre_index[0]:pre_index[1]], 1)
pre_edge = polyval(pre_coefs, energy) * energy**(-nvict)
# retrieving post-edge indices
# 1 is added to post_index[1] to include it during slicing
post_index = [0,-1]
post_index[0] = index_nearest(energy, postrange[0] + e0, kind='lower')
post_index[1] = index_nearest(energy, postrange[1] + e0)
# indices must be at least 2 values apart
if ptp(post_index) < 2:
raise ValueError('energy range for post-edge fit provides less than 2 points. consider increasing it')
#post_index[1] = min(len(energy), post_index[0] + 2)
if nnorm is None:
nnorm = 2
elif nnorm > 3:
warn('polynomial degree for post-edge curve is %s. please verify your results.' % nnorm)
# post-edge fit
post_mu = mu[post_index[0]:post_index[1]]
post_coefs = polyfit(energy[post_index[0]:post_index[1]], post_mu, nnorm)
post_edge = polyval(post_coefs, energy)
# edge_step
ie0 = index_nearest(energy, e0)
edge_step = post_edge[ie0] - pre_edge[ie0]
# normalized mu
norm = (mu - pre_edge) / edge_step
# flattened mu
flat = ( (mu - post_edge) / edge_step + 1.0)
flat[:ie0] = norm[:ie0]
# output dictionaries
pre_edge_pars.update({'nvict': nvict, 'nnorm': nnorm})
content = {'e0' : e0,
'edge_step' : edge_step,
'norm' : norm,
'flat' : flat,
'pre_edge' : pre_edge,
'post_edge' : post_edge,
'pre_coefs' : pre_coefs,
'post_coefs' : post_coefs,
'pre_edge_pars': pre_edge_pars,
}
if update:
group.add_content(content)
return content | def pre_edge(group: Group, e0: float=None, nvict: int=0, nnorm: int=2,
pre_range: list=[-inf,-50], post_range: list=[100,inf],
update:bool =False) -> dict:
"""Pre-edge substaction and normalization of a XAFS scan.
Parameters
----------
group
Group containing the spectrum for pre-edge substraction and normalization.
e0
Absorption threshold energy. If None it will seach for the
value stored in ``group.e0``. Otherwise it will be calculated
using :func:`~araucaria.xas.normalize.find_e0`. with default
parameters.
nvict
Energy exponent for pre-edge fit with a Victoreen polynomial.
The default is 0. See Notes for details.
nnorm
Degree of polynomial for post-edge fit. The default is 2.
pre_range
Energy range with respect to `e0` for the pre-edge fit.
The default is [-:data:`~numpy.inf`, -50].
post_range
Energy range with respect to `e0` for the post-edge fit.
The default is [100, :data:`~numpy.inf`].
update
Indicates if the group should be updated with the normalization attributes.
The default is False.
Returns
-------
:
Dictionary with the following arguments:
- ``e0`` : absorption threshold energy :math:`E_0`.
- ``edge_step`` : absorption edge step :math:`\Delta \mu(E_0)`.
- ``norm`` : array with normalized :math:`\mu(E)`.
- ``flat`` : array with flattened :math:`\mu(E)`.
- ``pre_edge`` : fitted pre-edge polynomial.
- ``post_edge`` : fitted post-edge polynomial.
- ``pre_coefs`` : coefficients for the pre-edge Victoreen polynomial.
- ``post_coefs`` : coefficients for the post-edge polynomial.
- ``pre_edge_pars``: dictionary with pre-edge parameters.
Raises
------
TypeError
If ``group`` is not a valid Group instance.
AttributeError
If attribute ``energy`` does not exist in ``group``.
IndexError
If ``e0`` is outside the range of ``group.energy``.
ValueError
If ``pre_range`` contains less than two energy points.
ValueError
If ``post_range`` contains less than two energy points.
Warning
-------
A warning will be raised if the degree of the post-edge polynomial is larger than 3.
See also
--------
:func:`~araucaria.plot.fig_pre_edge.fig_pre_edge`
Plot the results of pre-edge substraction and normalization.
Notes
-----
Pre-edge substraction and normalization is performed as follows:
1. The absorption threshold is determined (if ``e0`` or ``group.e0`` is not supplied).
2. A Victoreen polymonial with energy exponent ``nvict`` is fitted to the region below
the edge, as specified by ``pre_range`` (2 coefficients are fitted):
:math:`\mu(E) \cdot E^{nvict} = m \cdot E + b`
3. A polymonial of degree ``nnorm`` is fitted to the region above the edge, as specified
by ``post_range`` (``nnorm`` + 1 coefficients are fitted).
4. The edge step is deterimned by extrapolating both curves to `e0`.
5. A flattetned spectrum is calculated by removing the polynomial above the edge from the
normalized spectrum, while maintaining the offset of the polynomial at ``e0``.
If ``update=True`` the contents of the returned dictionary will be
included as attributes of ``group``.
Example
-------
>>> from araucaria.testdata import get_testpath
>>> from araucaria import Group
>>> from araucaria.io import read_dnd
>>> from araucaria.xas import pre_edge
>>> from araucaria.utils import check_objattrs
>>> fpath = get_testpath('dnd_testfile1.dat')
>>> group = read_dnd(fpath, scan='mu') # extracting mu and mu_ref scans
>>> attrs = ['e0', 'edge_step', 'pre_edge', 'post_edge', 'norm', 'flat']
>>> pre = pre_edge(group, update=True)
>>> check_objattrs(group, Group, attrs)
[True, True, True, True, True, True]
"""
# checking class and attributes
check_objattrs(group, Group, attrlist=['energy'], exceptions=True)
# storing energy and mu as indepedent arrays
energy = group.energy
mu = getattr(group, group.get_mode())
# assigning e0
if e0 is not None:
if e0 < min(energy) or e0 > max(energy):
raise IndexError('e0 is outside the energy range.')
else:
e0 = energy[index_nearest(energy, e0)]
elif hasattr(group, 'e0'):
if group.e0 < min(energy) or group.e0 > max(energy):
raise IndexError('group.e0 is outside the energy range.')
else:
e0 = energy[index_nearest(energy, group.e0)]
else:
e0 = find_e0(group, update=False)
# storing pre_edge_pars in dict
pre_edge_pars = {'pre_range': pre_range,
'post_range': post_range}
# assiging values inside the energy array
prerange = check_xrange(pre_range, energy, refval=e0)
postrange = check_xrange(post_range, energy, refval=e0)
# retrieving pre-edge indices
# 1 is added to pre_index[1] to include it during slicing
pre_index = [0,-1]
pre_index[0] = index_nearest(energy, prerange[0] + e0, kind='lower')
pre_index[1] = index_nearest(energy, prerange[1] + e0)
# indices must be at least 2 values apart
if ptp(pre_index) < 2:
raise ValueError('energy range for pre-edge fit provides less than 2 points. consider increasing it.')
#pre_index[1] = min(len(energy), pre_index[0] + 2)
omu = mu * energy**nvict
pre_coefs = polyfit(energy[pre_index[0]:pre_index[1]],
omu[pre_index[0]:pre_index[1]], 1)
pre_edge = polyval(pre_coefs, energy) * energy**(-nvict)
# retrieving post-edge indices
# 1 is added to post_index[1] to include it during slicing
post_index = [0,-1]
post_index[0] = index_nearest(energy, postrange[0] + e0, kind='lower')
post_index[1] = index_nearest(energy, postrange[1] + e0)
# indices must be at least 2 values apart
if ptp(post_index) < 2:
raise ValueError('energy range for post-edge fit provides less than 2 points. consider increasing it')
#post_index[1] = min(len(energy), post_index[0] + 2)
if nnorm is None:
nnorm = 2
elif nnorm > 3:
warn('polynomial degree for post-edge curve is %s. please verify your results.' % nnorm)
# post-edge fit
post_mu = mu[post_index[0]:post_index[1]]
post_coefs = polyfit(energy[post_index[0]:post_index[1]], post_mu, nnorm)
post_edge = polyval(post_coefs, energy)
# edge_step
ie0 = index_nearest(energy, e0)
edge_step = post_edge[ie0] - pre_edge[ie0]
# normalized mu
norm = (mu - pre_edge) / edge_step
# flattened mu
flat = ( (mu - post_edge) / edge_step + 1.0)
flat[:ie0] = norm[:ie0]
# output dictionaries
pre_edge_pars.update({'nvict': nvict, 'nnorm': nnorm})
content = {'e0' : e0,
'edge_step' : edge_step,
'norm' : norm,
'flat' : flat,
'pre_edge' : pre_edge,
'post_edge' : post_edge,
'pre_coefs' : pre_coefs,
'post_coefs' : post_coefs,
'pre_edge_pars': pre_edge_pars,
}
if update:
group.add_content(content)
return content |
Python | def formula_parser(formula: str, mult: float=1) -> dict:
"""Returns a dictionary with parsed formula.
Parameters
---------
formula
Chemical formula to parse.
mult
Multiplier for chemical formula.
Returns
-------
:
Dictionary with parsed formula.
Raises
------
NameError
If parentheses in ``formula`` are not balanced.
Example
-------
>>> from araucaria.xrdb import formula_parser
>>> formulas = ['As2O3', 'Fe(OH)3', 'CuCO3Cu(OH)2']
>>> for f in formulas:
... print(formula_parser(f))
{'As': 2.0, 'O': 3.0}
{'Fe': 1.0, 'H': 3.0, 'O': 3.0}
{'C': 1.0, 'Cu': 2.0, 'H': 2.0, 'O': 5.0}
"""
out = _parser(formula, mult=mult)
out = _format_parser(out)
# unique elements
unique = set([val[0] for val in out])
# dict container
fdict = {}
for item in sorted(unique):
val = 0
for pars in out:
if pars[0] == item:
val += pars[1]
fdict[item] = val
return fdict | def formula_parser(formula: str, mult: float=1) -> dict:
"""Returns a dictionary with parsed formula.
Parameters
---------
formula
Chemical formula to parse.
mult
Multiplier for chemical formula.
Returns
-------
:
Dictionary with parsed formula.
Raises
------
NameError
If parentheses in ``formula`` are not balanced.
Example
-------
>>> from araucaria.xrdb import formula_parser
>>> formulas = ['As2O3', 'Fe(OH)3', 'CuCO3Cu(OH)2']
>>> for f in formulas:
... print(formula_parser(f))
{'As': 2.0, 'O': 3.0}
{'Fe': 1.0, 'H': 3.0, 'O': 3.0}
{'C': 1.0, 'Cu': 2.0, 'H': 2.0, 'O': 5.0}
"""
out = _parser(formula, mult=mult)
out = _format_parser(out)
# unique elements
unique = set([val[0] for val in out])
# dict container
fdict = {}
for item in sorted(unique):
val = 0
for pars in out:
if pars[0] == item:
val += pars[1]
fdict[item] = val
return fdict |
Python | def roll_med(data: ndarray, window: int, min_samples: int=2,
edgemethod: str='nan') -> ndarray:
"""Computes the rolling median of a 1-D array.
Parameters
----------
data:
Array to compute the rolling median.
window:
Size of the rolling window for analysis.
min_samples:
Minimum sample points to calculate the median in each window.
The default is 2.
edgemethod :
Dictates how medians are calculated at the edges of the array.
Options are 'nan', 'calc' and 'extend'. See the Notes for further details.
The default is 'nan'.
Returns
-------
:
Rolling median of the array.
Raises
------
ValueError
If ``window`` is not an odd value.
ValueError
If ``window`` is smaller or equal than 3.
TypeError
If ``window`` is not an integer.
ValueError
If ``edgemethod`` is not recognized.
Notes
-----
This function calculates the median of a moving window. Results are returned in the
index corresponding to the center of the window. The function ignores :data:`~numpy.nan`
values in the array.
- ``edgemethod='nan'`` uses :data:`~numpy.nan` values for missing values at the edges.
- ``edgemethod='calc'`` uses an abbreviated window at the edges
(e.g. the first sample will have (window/2)+1 points in the calculation).
- ``edgemethod='extend'`` uses the nearest calculated value for missing values at the edges.
Warning
-------
If ``window`` is less than ``min_samples``, :data:`~numpy.nan` is given as the median.
Example
-------
.. plot::
:context: reset
>>> from numpy import pi, sin, linspace
>>> from araucaria.stats import roll_med
>>> import matplotlib.pyplot as plt
>>> # generating a signal and its rolling median
>>> f1 = 0.2 # frequency
>>> t = linspace(0,10)
>>> y = sin(2*pi*f1*t)
>>> line = plt.plot(t,y, label='signal')
>>> for method in ['calc', 'extend', 'nan']:
... fy = roll_med(y, window=25, edgemethod=method)
... line = plt.plot(t, fy, marker='o', label=method)
>>> lab = plt.xlabel('t')
>>> lab =plt.ylabel('y')
>>> leg = plt.legend()
>>> plt.show(block=False)
"""
if window % 2 == 0:
raise ValueError('window length must be an odd value.')
elif window < 3 or type(window)!=int:
raise ValueError('window length must be larger than 3.')
validEdgeMethods = ['nan', 'extend', 'calc']
if edgemethod not in validEdgeMethods:
raise ValueError('please choose a valid edgemethod.')
# calculating points on either side of the point of interest in the window
movement = int((window - 1) / 2)
med_array = array([nan for point in data])
for i, point in enumerate(data[ : -movement]):
if i>=movement:
if count_nonzero(isnan(data[i - movement : i + 1 + movement]) == False) >= min_samples:
med_array[i] = nanmedian(data[i - movement : i + 1 + movement])
if edgemethod == 'nan':
return med_array
for i, point in enumerate(data[ : movement]):
if edgemethod == 'calc':
if count_nonzero(isnan(data[0 : i + 1 + movement]) == False) >= min_samples:
med_array[i] = nanmedian(data[0 : i + 1 + movement])
elif edgemethod == 'extend':
med_array[i] = med_array[movement]
for i, point in enumerate(data[-movement : ]):
if edgemethod == 'calc':
if count_nonzero(isnan(data[(-2 * movement) + i : ]) == False) >= min_samples:
med_array[-movement + i] = nanmedian(data[(-2 * movement) + i : ])
elif edgemethod == 'extend':
med_array[-movement + i] = med_array[-movement - 1]
return med_array | def roll_med(data: ndarray, window: int, min_samples: int=2,
edgemethod: str='nan') -> ndarray:
"""Computes the rolling median of a 1-D array.
Parameters
----------
data:
Array to compute the rolling median.
window:
Size of the rolling window for analysis.
min_samples:
Minimum sample points to calculate the median in each window.
The default is 2.
edgemethod :
Dictates how medians are calculated at the edges of the array.
Options are 'nan', 'calc' and 'extend'. See the Notes for further details.
The default is 'nan'.
Returns
-------
:
Rolling median of the array.
Raises
------
ValueError
If ``window`` is not an odd value.
ValueError
If ``window`` is smaller or equal than 3.
TypeError
If ``window`` is not an integer.
ValueError
If ``edgemethod`` is not recognized.
Notes
-----
This function calculates the median of a moving window. Results are returned in the
index corresponding to the center of the window. The function ignores :data:`~numpy.nan`
values in the array.
- ``edgemethod='nan'`` uses :data:`~numpy.nan` values for missing values at the edges.
- ``edgemethod='calc'`` uses an abbreviated window at the edges
(e.g. the first sample will have (window/2)+1 points in the calculation).
- ``edgemethod='extend'`` uses the nearest calculated value for missing values at the edges.
Warning
-------
If ``window`` is less than ``min_samples``, :data:`~numpy.nan` is given as the median.
Example
-------
.. plot::
:context: reset
>>> from numpy import pi, sin, linspace
>>> from araucaria.stats import roll_med
>>> import matplotlib.pyplot as plt
>>> # generating a signal and its rolling median
>>> f1 = 0.2 # frequency
>>> t = linspace(0,10)
>>> y = sin(2*pi*f1*t)
>>> line = plt.plot(t,y, label='signal')
>>> for method in ['calc', 'extend', 'nan']:
... fy = roll_med(y, window=25, edgemethod=method)
... line = plt.plot(t, fy, marker='o', label=method)
>>> lab = plt.xlabel('t')
>>> lab =plt.ylabel('y')
>>> leg = plt.legend()
>>> plt.show(block=False)
"""
if window % 2 == 0:
raise ValueError('window length must be an odd value.')
elif window < 3 or type(window)!=int:
raise ValueError('window length must be larger than 3.')
validEdgeMethods = ['nan', 'extend', 'calc']
if edgemethod not in validEdgeMethods:
raise ValueError('please choose a valid edgemethod.')
# calculating points on either side of the point of interest in the window
movement = int((window - 1) / 2)
med_array = array([nan for point in data])
for i, point in enumerate(data[ : -movement]):
if i>=movement:
if count_nonzero(isnan(data[i - movement : i + 1 + movement]) == False) >= min_samples:
med_array[i] = nanmedian(data[i - movement : i + 1 + movement])
if edgemethod == 'nan':
return med_array
for i, point in enumerate(data[ : movement]):
if edgemethod == 'calc':
if count_nonzero(isnan(data[0 : i + 1 + movement]) == False) >= min_samples:
med_array[i] = nanmedian(data[0 : i + 1 + movement])
elif edgemethod == 'extend':
med_array[i] = med_array[movement]
for i, point in enumerate(data[-movement : ]):
if edgemethod == 'calc':
if count_nonzero(isnan(data[(-2 * movement) + i : ]) == False) >= min_samples:
med_array[-movement + i] = nanmedian(data[(-2 * movement) + i : ])
elif edgemethod == 'extend':
med_array[-movement + i] = med_array[-movement - 1]
return med_array |
Python | def cluster(collection: Collection, taglist: List[str]=['all'],
cluster_region: str='xanes', cluster_range: list=[-inf,inf],
method: str='single', metric: str='euclidean',
kweight: int=2) -> Dataset:
"""Performs hierarchical clustering on a collection.
Parameters
----------
collection
Collection with the groups for clustering.
taglist
List with keys to filter groups based on their ``tags``
attributes in the Collection.
The default is ['all'].
cluster_region
XAFS region to perform clustering. Accepted values are 'dxanes',
'xanes', or 'exafs'. The default is 'xanes'.
cluster_range
Domain range in absolute values. Energy units are expected
for 'dxanes' or 'xanes', while wavenumber (k) units are expected
for 'exafs'.
The default is [-:data:`~numpy.inf`, :data:`~numpy.inf`].
method
Likage method to compute the distance between clusters.
See the :func:`~scipy.cluster.hierarchy.linkage` function
of ``scipy`` for a list of valid method names.
The default is 'single'.
metric
The distance metric. See the :func:`~scipy.spatial.distance.pdist`
function of ``scipy`` for a list of valid distance metrics.
The default is 'euclidean'.
kweight
Exponent for weighting chi(k) by k^kweight.
Only valid for ``cluster_region='exafs'``.
The default is 2.
Returns
-------
:
Dataset with the following arguments:
- ``Z`` : hierarchical clustering encoded as a linkage matrix.
- ``groupnames`` : list with names of clustered groups.
- ``energy`` : array with energy values. Returned only if
``cluster_region='xanes`` or ``cluster_region=dxanes``.
- ``k`` : array with wavenumber values. Returned only if
``cluster_region='exafs'``.
- ``matrix`` : array with observed values for groups in ``cluster_range``.
- ``cluster_pars`` : dictionary with cluster parameters.
See also
--------
:func:`~araucaria.plot.fig_cluster.fig_cluster` : Plots the dendrogram of a hierarchical clustering.
Examples
--------
>>> from araucaria.testdata import get_testpath
>>> from araucaria import Dataset
>>> from araucaria.xas import pre_edge, autobk
>>> from araucaria.stats import cluster
>>> from araucaria.io import read_collection_hdf5
>>> from araucaria.utils import check_objattrs
>>> fpath = get_testpath('Fe_database.h5')
>>> collection = read_collection_hdf5(fpath)
>>> collection.apply(pre_edge)
>>> out = cluster(collection, cluster_region='xanes')
>>> attrs = ['groupnames', 'energy', 'matrix', 'Z', 'cluster_pars']
>>> check_objattrs(out, Dataset, attrs)
[True, True, True, True, True]
>>> # exafs clustering
>>> collection.apply(autobk)
>>> out = cluster(collection, cluster_region='exafs', cluster_range=[0,10])
>>> attrs = ['groupnames', 'k', 'matrix', 'Z', 'cluster_pars']
>>> check_objattrs(out, Dataset, attrs)
[True, True, True, True, True]
"""
xvals, matrix = get_mapped_data(collection, taglist=taglist, region=cluster_region,
range=cluster_range, kweight=kweight)
# linkage matrix
# matrix is transposed to follow the m by n convention with m observation vectors
Z = linkage(matrix.T, method=method, metric=metric)
# storing cluster parameters
cluster_pars = {'cluster_region': cluster_region,
'cluster_range' : cluster_range,
'method' : method,
'metric' : metric,}
# additional cluster parameters
if cluster_region == 'exafs':
xvar = 'k' # x-variable
cluster_pars['kweight'] = kweight
else:
# xanes/dxanes clustering
xvar = 'energy' # x-variable
# storing cluster results
content = {'groupnames' : collection.get_names(taglist=taglist),
xvar : xvals,
'matrix' : matrix,
'Z' : Z,
'cluster_pars' : cluster_pars,}
out = Dataset(**content)
return out | def cluster(collection: Collection, taglist: List[str]=['all'],
cluster_region: str='xanes', cluster_range: list=[-inf,inf],
method: str='single', metric: str='euclidean',
kweight: int=2) -> Dataset:
"""Performs hierarchical clustering on a collection.
Parameters
----------
collection
Collection with the groups for clustering.
taglist
List with keys to filter groups based on their ``tags``
attributes in the Collection.
The default is ['all'].
cluster_region
XAFS region to perform clustering. Accepted values are 'dxanes',
'xanes', or 'exafs'. The default is 'xanes'.
cluster_range
Domain range in absolute values. Energy units are expected
for 'dxanes' or 'xanes', while wavenumber (k) units are expected
for 'exafs'.
The default is [-:data:`~numpy.inf`, :data:`~numpy.inf`].
method
Likage method to compute the distance between clusters.
See the :func:`~scipy.cluster.hierarchy.linkage` function
of ``scipy`` for a list of valid method names.
The default is 'single'.
metric
The distance metric. See the :func:`~scipy.spatial.distance.pdist`
function of ``scipy`` for a list of valid distance metrics.
The default is 'euclidean'.
kweight
Exponent for weighting chi(k) by k^kweight.
Only valid for ``cluster_region='exafs'``.
The default is 2.
Returns
-------
:
Dataset with the following arguments:
- ``Z`` : hierarchical clustering encoded as a linkage matrix.
- ``groupnames`` : list with names of clustered groups.
- ``energy`` : array with energy values. Returned only if
``cluster_region='xanes`` or ``cluster_region=dxanes``.
- ``k`` : array with wavenumber values. Returned only if
``cluster_region='exafs'``.
- ``matrix`` : array with observed values for groups in ``cluster_range``.
- ``cluster_pars`` : dictionary with cluster parameters.
See also
--------
:func:`~araucaria.plot.fig_cluster.fig_cluster` : Plots the dendrogram of a hierarchical clustering.
Examples
--------
>>> from araucaria.testdata import get_testpath
>>> from araucaria import Dataset
>>> from araucaria.xas import pre_edge, autobk
>>> from araucaria.stats import cluster
>>> from araucaria.io import read_collection_hdf5
>>> from araucaria.utils import check_objattrs
>>> fpath = get_testpath('Fe_database.h5')
>>> collection = read_collection_hdf5(fpath)
>>> collection.apply(pre_edge)
>>> out = cluster(collection, cluster_region='xanes')
>>> attrs = ['groupnames', 'energy', 'matrix', 'Z', 'cluster_pars']
>>> check_objattrs(out, Dataset, attrs)
[True, True, True, True, True]
>>> # exafs clustering
>>> collection.apply(autobk)
>>> out = cluster(collection, cluster_region='exafs', cluster_range=[0,10])
>>> attrs = ['groupnames', 'k', 'matrix', 'Z', 'cluster_pars']
>>> check_objattrs(out, Dataset, attrs)
[True, True, True, True, True]
"""
xvals, matrix = get_mapped_data(collection, taglist=taglist, region=cluster_region,
range=cluster_range, kweight=kweight)
# linkage matrix
# matrix is transposed to follow the m by n convention with m observation vectors
Z = linkage(matrix.T, method=method, metric=metric)
# storing cluster parameters
cluster_pars = {'cluster_region': cluster_region,
'cluster_range' : cluster_range,
'method' : method,
'metric' : metric,}
# additional cluster parameters
if cluster_region == 'exafs':
xvar = 'k' # x-variable
cluster_pars['kweight'] = kweight
else:
# xanes/dxanes clustering
xvar = 'energy' # x-variable
# storing cluster results
content = {'groupnames' : collection.get_names(taglist=taglist),
xvar : xvals,
'matrix' : matrix,
'Z' : Z,
'cluster_pars' : cluster_pars,}
out = Dataset(**content)
return out |
Python | def check_objattrs(obj: object, objtype: TypeVar, attrlist: list=None,
exceptions: bool=False) -> List[bool]:
"""Check type and attributes of an object.
Parameters
----------
obj
Object to check.
objtype
Type for the object.
attrlist
List with names of attributes to check.
exceptions
Condition to raise exceptions if attributes
are not in the object. The default is False.
Returns
-------
:
List with booleans for each attribute of the object.
Raises
------
TypeError
If ``obj`` is not an instance of ``objtype``.
Examples
--------
>>> from araucaria import Group
>>> from araucaria.utils import check_objattrs
>>> group = Group(**{'energy': [1,2,3,4], 'mu': [2,2,3,1]})
>>> # checking class type
>>> check_objattrs(group, Group)
True
>>> # checking class type and attributes
>>> alist = ['energy', 'mu', 'mu_ref']
>>> check_objattrs(group, Group, attrlist = alist)
[True, True, False]
"""
if not isinstance(obj, objtype):
raise TypeError('object is not a valid %s instance.' % objtype.__name__)
elif attrlist is None:
return True
boolist = []
for attr in attrlist:
if hasattr(obj, attr) is False:
if exceptions:
raise AttributeError("%s instance has no '%s' attribute." % (objtype.__name__, attr))
else:
boolist.append(False)
else:
boolist.append(True)
return boolist | def check_objattrs(obj: object, objtype: TypeVar, attrlist: list=None,
exceptions: bool=False) -> List[bool]:
"""Check type and attributes of an object.
Parameters
----------
obj
Object to check.
objtype
Type for the object.
attrlist
List with names of attributes to check.
exceptions
Condition to raise exceptions if attributes
are not in the object. The default is False.
Returns
-------
:
List with booleans for each attribute of the object.
Raises
------
TypeError
If ``obj`` is not an instance of ``objtype``.
Examples
--------
>>> from araucaria import Group
>>> from araucaria.utils import check_objattrs
>>> group = Group(**{'energy': [1,2,3,4], 'mu': [2,2,3,1]})
>>> # checking class type
>>> check_objattrs(group, Group)
True
>>> # checking class type and attributes
>>> alist = ['energy', 'mu', 'mu_ref']
>>> check_objattrs(group, Group, attrlist = alist)
[True, True, False]
"""
if not isinstance(obj, objtype):
raise TypeError('object is not a valid %s instance.' % objtype.__name__)
elif attrlist is None:
return True
boolist = []
for attr in attrlist:
if hasattr(obj, attr) is False:
if exceptions:
raise AttributeError("%s instance has no '%s' attribute." % (objtype.__name__, attr))
else:
boolist.append(False)
else:
boolist.append(True)
return boolist |
Python | def del_group(self, name) -> None:
"""Removes a group dataset from the collection.
Parameters
----------
name
Name of group to remove.
Returns
-------
:
Raises
------
TypeError
If ``name`` is not in a group in the collection.
Example
-------
>>> from araucaria import Collection, Group
>>> from araucaria.utils import check_objattrs
>>> collection = Collection()
>>> g1 = Group(**{'name': 'group1'})
>>> g2 = Group(**{'name': 'group2'})
>>> for group in (g1, g2):
... collection.add_group(group)
>>> check_objattrs(collection, Collection, attrlist=['group1','group2'])
[True, True]
>>> collection.del_group('group2')
>>> check_objattrs(collection, Collection, attrlist=['group1','group2'])
[True, False]
>>> # verifying that the deleted group has no tag
>>> for key, value in collection.tags.items():
... print(key, value)
scan ['group1']
"""
if not hasattr(self, name):
raise AttributeError('collection has no %s group.' % name)
# retrieving original tag key
for key, val in self.tags.items():
if name in val:
initag = key
break
# removing groupname from original tag
self.tags[initag].remove(name)
# removing entire key if group list is empty
if not self.tags[initag]:
del self.tags[initag]
# removing group
delattr(self, name) | def del_group(self, name) -> None:
"""Removes a group dataset from the collection.
Parameters
----------
name
Name of group to remove.
Returns
-------
:
Raises
------
TypeError
If ``name`` is not in a group in the collection.
Example
-------
>>> from araucaria import Collection, Group
>>> from araucaria.utils import check_objattrs
>>> collection = Collection()
>>> g1 = Group(**{'name': 'group1'})
>>> g2 = Group(**{'name': 'group2'})
>>> for group in (g1, g2):
... collection.add_group(group)
>>> check_objattrs(collection, Collection, attrlist=['group1','group2'])
[True, True]
>>> collection.del_group('group2')
>>> check_objattrs(collection, Collection, attrlist=['group1','group2'])
[True, False]
>>> # verifying that the deleted group has no tag
>>> for key, value in collection.tags.items():
... print(key, value)
scan ['group1']
"""
if not hasattr(self, name):
raise AttributeError('collection has no %s group.' % name)
# retrieving original tag key
for key, val in self.tags.items():
if name in val:
initag = key
break
# removing groupname from original tag
self.tags[initag].remove(name)
# removing entire key if group list is empty
if not self.tags[initag]:
del self.tags[initag]
# removing group
delattr(self, name) |
Python | def summary(self, taglist: List[str]=['all'], regex: str=None,
optional: Optional[list]=None) -> Report:
"""Returns a summary report of groups in a collection.
Parameters
----------
taglist
List with keys to filter groups in the collection based
on the ``tags`` attribute. The default is ['all'].
regex
Search string to filter results by group name. See Notes for details.
The default is None.
optional
List with optional parameters. See Notes for details.
The default is None.
Returns
-------
:
Report for datasets in the HDF5 file.
Raises
------
ValueError
If any item in ``taglist`` is not a key of the ``tags`` attribute.
Notes
-----
Summary data includes the following:
1. Group index.
2. Group name.
3. Group tag.
4. Measurement mode.
5. Numbers of scans.
6. Merged scans, if ``optional=['merged_scans']``.
7. Optional parameters if they exist as attributes in the group.
A ``regex`` value can be used to filter group names based
on a regular expression (reges). For valid regex syntax, please
check the documentation of the module :mod:`re`.
The number of scans and names of merged files are retrieved
from the ``merged_scans`` attribute of ``collection``.
Optional parameters will be retrieved from the groups as
attributes. Currently only :class:`str`, :class:`float` or
:class:`int` will be retrieved. Otherswise an empty character
will be printed in the report.
See also
--------
:class:`~araucaria.main.report.Report`
Examples
--------
>>> from araucaria.testdata import get_testpath
>>> from araucaria.io import read_collection_hdf5
>>> fpath = get_testpath('Fe_database.h5')
>>> collection = read_collection_hdf5(fpath)
>>> # printing default summary
>>> report = collection.summary()
>>> report.show()
=======================================
id dataset tag mode n
=======================================
1 FeIISO4_20K scan mu 5
2 Fe_Foil scan mu_ref 5
3 Ferrihydrite_20K scan mu 5
4 Goethite_20K scan mu 5
=======================================
>>> # printing summary of dnd file with merged scans
>>> report = collection.summary(regex='Goe', optional=['merged_scans'])
>>> report.show()
=============================================================
id dataset tag mode n merged_scans
=============================================================
1 Goethite_20K scan mu 5 20K_GOE_Fe_K_240.00000.xdi
20K_GOE_Fe_K_240.00001.xdi
20K_GOE_Fe_K_240.00002.xdi
20K_GOE_Fe_K_240.00003.xdi
20K_GOE_Fe_K_240.00004.xdi
=============================================================
>>> # printing custom summary
>>> from araucaria.testdata import get_testpath
>>> from araucaria import Collection
>>> from araucaria.io import read_xmu
>>> fpath = get_testpath('xmu_testfile.xmu')
>>> # extracting mu and mu_ref scans
>>> group_mu = read_xmu(fpath, scan='mu')
>>> # adding additional attributes
>>> group_mu.symbol = 'Zn'
>>> group_mu.temp = 25.0
>>> # saving in a collection
>>> collection = Collection()
>>> collection.add_group(group_mu)
>>> report = collection.summary(optional=['symbol','temp'])
>>> report.show()
===================================================
id dataset tag mode n symbol temp
===================================================
1 xmu_testfile.xmu scan mu 1 Zn 25
===================================================
"""
# list with parameter names
field_names = ['id', 'dataset', 'tag', 'mode', 'n']
# verifying optional values
if optional is not None:
for opt_val in optional:
field_names.append(opt_val)
# instanciating report class
report = Report()
report.set_columns(field_names)
# number of records
names = self.get_names(taglist=taglist)
if regex is None:
pass
else:
index = []
for i, name in enumerate(names):
if search(regex, name) is None:
pass
else:
index.append(i)
names = [names[i] for i in index]
ncols = len(names)
for i, name in enumerate(names):
data = self.get_group(name)
scanval = data.get_mode()
tag = self.get_tag(name)
extra_content = False # aux variable for 'merged_scans'
try:
# number of merged_scans
nscans = len(data.merged_scans)
except:
nscans = 1
field_vals = [i+1, name, tag, scanval, nscans]
if optional is not None:
for j, opt_val in enumerate(optional):
if opt_val == 'merged_scans':
if i == 0:
# storing the col merge_index
merge_index = len(field_vals)
try:
list_scans = data.merged_scans
field_vals.append(data.merged_scans[0])
extra_content = True
except:
field_vals.append('None')
else:
# custom optional field
try:
val = getattr(data, opt_val)
if isinstance(val, (int, float, str)):
# if val is int or float print it
field_vals.append(val)
else:
field_vals.append('')
except:
field_vals.append('')
report.add_row(field_vals)
if extra_content:
for item in list_scans[1:]:
field_vals = []
for j,index in enumerate(field_names):
if j != merge_index:
field_vals.append('')
else:
field_vals.append(item)
report.add_row(field_vals)
if i < (ncols - 1):
report.add_midrule()
return report | def summary(self, taglist: List[str]=['all'], regex: str=None,
optional: Optional[list]=None) -> Report:
"""Returns a summary report of groups in a collection.
Parameters
----------
taglist
List with keys to filter groups in the collection based
on the ``tags`` attribute. The default is ['all'].
regex
Search string to filter results by group name. See Notes for details.
The default is None.
optional
List with optional parameters. See Notes for details.
The default is None.
Returns
-------
:
Report for datasets in the HDF5 file.
Raises
------
ValueError
If any item in ``taglist`` is not a key of the ``tags`` attribute.
Notes
-----
Summary data includes the following:
1. Group index.
2. Group name.
3. Group tag.
4. Measurement mode.
5. Numbers of scans.
6. Merged scans, if ``optional=['merged_scans']``.
7. Optional parameters if they exist as attributes in the group.
A ``regex`` value can be used to filter group names based
on a regular expression (reges). For valid regex syntax, please
check the documentation of the module :mod:`re`.
The number of scans and names of merged files are retrieved
from the ``merged_scans`` attribute of ``collection``.
Optional parameters will be retrieved from the groups as
attributes. Currently only :class:`str`, :class:`float` or
:class:`int` will be retrieved. Otherswise an empty character
will be printed in the report.
See also
--------
:class:`~araucaria.main.report.Report`
Examples
--------
>>> from araucaria.testdata import get_testpath
>>> from araucaria.io import read_collection_hdf5
>>> fpath = get_testpath('Fe_database.h5')
>>> collection = read_collection_hdf5(fpath)
>>> # printing default summary
>>> report = collection.summary()
>>> report.show()
=======================================
id dataset tag mode n
=======================================
1 FeIISO4_20K scan mu 5
2 Fe_Foil scan mu_ref 5
3 Ferrihydrite_20K scan mu 5
4 Goethite_20K scan mu 5
=======================================
>>> # printing summary of dnd file with merged scans
>>> report = collection.summary(regex='Goe', optional=['merged_scans'])
>>> report.show()
=============================================================
id dataset tag mode n merged_scans
=============================================================
1 Goethite_20K scan mu 5 20K_GOE_Fe_K_240.00000.xdi
20K_GOE_Fe_K_240.00001.xdi
20K_GOE_Fe_K_240.00002.xdi
20K_GOE_Fe_K_240.00003.xdi
20K_GOE_Fe_K_240.00004.xdi
=============================================================
>>> # printing custom summary
>>> from araucaria.testdata import get_testpath
>>> from araucaria import Collection
>>> from araucaria.io import read_xmu
>>> fpath = get_testpath('xmu_testfile.xmu')
>>> # extracting mu and mu_ref scans
>>> group_mu = read_xmu(fpath, scan='mu')
>>> # adding additional attributes
>>> group_mu.symbol = 'Zn'
>>> group_mu.temp = 25.0
>>> # saving in a collection
>>> collection = Collection()
>>> collection.add_group(group_mu)
>>> report = collection.summary(optional=['symbol','temp'])
>>> report.show()
===================================================
id dataset tag mode n symbol temp
===================================================
1 xmu_testfile.xmu scan mu 1 Zn 25
===================================================
"""
# list with parameter names
field_names = ['id', 'dataset', 'tag', 'mode', 'n']
# verifying optional values
if optional is not None:
for opt_val in optional:
field_names.append(opt_val)
# instanciating report class
report = Report()
report.set_columns(field_names)
# number of records
names = self.get_names(taglist=taglist)
if regex is None:
pass
else:
index = []
for i, name in enumerate(names):
if search(regex, name) is None:
pass
else:
index.append(i)
names = [names[i] for i in index]
ncols = len(names)
for i, name in enumerate(names):
data = self.get_group(name)
scanval = data.get_mode()
tag = self.get_tag(name)
extra_content = False # aux variable for 'merged_scans'
try:
# number of merged_scans
nscans = len(data.merged_scans)
except:
nscans = 1
field_vals = [i+1, name, tag, scanval, nscans]
if optional is not None:
for j, opt_val in enumerate(optional):
if opt_val == 'merged_scans':
if i == 0:
# storing the col merge_index
merge_index = len(field_vals)
try:
list_scans = data.merged_scans
field_vals.append(data.merged_scans[0])
extra_content = True
except:
field_vals.append('None')
else:
# custom optional field
try:
val = getattr(data, opt_val)
if isinstance(val, (int, float, str)):
# if val is int or float print it
field_vals.append(val)
else:
field_vals.append('')
except:
field_vals.append('')
report.add_row(field_vals)
if extra_content:
for item in list_scans[1:]:
field_vals = []
for j,index in enumerate(field_names):
if j != merge_index:
field_vals.append('')
else:
field_vals.append(item)
report.add_row(field_vals)
if i < (ncols - 1):
report.add_midrule()
return report |
Python | def transform(self, obs: ndarray) -> ndarray:
"""Projects observations into principal components.
Parameters
----------
obs
Array with observed values.
Returns
-------
:
Array with scores on principal components.
"""
n = self.ncomps
return dot( self.U[:, :n].T, obs ) | def transform(self, obs: ndarray) -> ndarray:
"""Projects observations into principal components.
Parameters
----------
obs
Array with observed values.
Returns
-------
:
Array with scores on principal components.
"""
n = self.ncomps
return dot( self.U[:, :n].T, obs ) |
Python | def inverse_transform(self, p: ndarray) -> ndarray:
"""Converts principal components into observations.
Parameters
----------
p
Array with scores on principal components.
Returns
-------
:
Array with observed values.
"""
n = self.ncomps
return dot( self.U[:,:n], p) | def inverse_transform(self, p: ndarray) -> ndarray:
"""Converts principal components into observations.
Parameters
----------
p
Array with scores on principal components.
Returns
-------
:
Array with observed values.
"""
n = self.ncomps
return dot( self.U[:,:n], p) |
Python | def target_transform(model: PCAModel, collection: Collection,
taglist: List[str]=['all']) -> Dataset:
"""Performs target transformation on a collection.
Parameters
----------
model
PCA model to perform the projection and inverse transformation.
collection
Collection with the groups for target transformatino.
taglist
List with keys to filter groups based on their ``tags``
attributes in the Collection.
The default is ['all'].
Returns
-------
:
Dataset with the following attributes.
- ``groupnames``: list with names of transformed groups.
- ``energy`` : array with energy values. Returned only if
``pca_region='xanes`` or ``pca_region=dxanes``.
- ``k`` : array with wavenumber values. Returned only if
``pca_region='exafs'``.
- ``matrix`` : original array with mapped values.
- ``tmatrix`` : array with target transformed groups.
- ``scores`` : array with scores in the principal component basis.
- ``chi2`` : :math:`\\chi^2` values of the target tranformed groups.
Raises
------
TypeError
If ``model`` is not a valid PCAModel instance
KeyError
If attributes from :func:`~araucaria.stats.pca.pca`
do not exist in ``model``.
See also
--------
:func:`~araucaria.stats.pca.pca` : Performs principal component analysis on a collection.
:func:`~araucaria.plot.fig_pca.fig_target_transform` : Plots the results of target transformation.
Example
-------
>>> from araucaria.testdata import get_testpath
>>> from araucaria import Dataset
>>> from araucaria.io import read_collection_hdf5
>>> from araucaria.xas import pre_edge
>>> from araucaria.stats import pca, target_transform
>>> from araucaria.utils import check_objattrs
>>> fpath = get_testpath('Fe_database.h5')
>>> collection = read_collection_hdf5(fpath)
>>> collection.apply(pre_edge)
>>> model = pca(collection, pca_region='xanes', cumvar=0.9)
>>> data = target_transform(model, collection)
>>> attrs = ['groupnames', 'tmatrix', 'chi2', 'scores', 'energy']
>>> check_objattrs(data, Dataset, attrs)
[True, True, True, True, True]
"""
check_objattrs(model, PCAModel, attrlist=['groupnames', 'matrix',
'variance', 'pca_pars'], exceptions=True)
# retrieving pca parameters
pca_region = model.pca_pars['pca_region']
pca_range = model.pca_pars['pca_range']
# setting panels based on pca region
region = (model.pca_pars['pca_region'])
if region == 'exafs':
xvar = 'k'
kweight = model.pca_pars['kweight']
else:
xvar = 'energy'
kweight = 2
# mapped data for collection
domain = getattr(model, xvar)
xvals, matrix = get_mapped_data(collection, taglist=taglist, region=pca_region,
domain=domain, kweight=kweight)
# centering data
matrix = matrix - matrix.mean(axis=0)
# target transformation
scores = model.transform(matrix)
tmatrix = model.inverse_transform(scores)
chi2 = sum(divide( (matrix-tmatrix)**2, matrix), axis=0)
# storing target transformation results
content = {'groupnames' : collection.get_names(taglist=taglist),
xvar : domain,
'matrix' : matrix,
'tmatrix' : tmatrix,
'scores' : scores,
'chi2' : chi2}
# dataset class
out = Dataset(**content)
return out | def target_transform(model: PCAModel, collection: Collection,
taglist: List[str]=['all']) -> Dataset:
"""Performs target transformation on a collection.
Parameters
----------
model
PCA model to perform the projection and inverse transformation.
collection
Collection with the groups for target transformatino.
taglist
List with keys to filter groups based on their ``tags``
attributes in the Collection.
The default is ['all'].
Returns
-------
:
Dataset with the following attributes.
- ``groupnames``: list with names of transformed groups.
- ``energy`` : array with energy values. Returned only if
``pca_region='xanes`` or ``pca_region=dxanes``.
- ``k`` : array with wavenumber values. Returned only if
``pca_region='exafs'``.
- ``matrix`` : original array with mapped values.
- ``tmatrix`` : array with target transformed groups.
- ``scores`` : array with scores in the principal component basis.
- ``chi2`` : :math:`\\chi^2` values of the target tranformed groups.
Raises
------
TypeError
If ``model`` is not a valid PCAModel instance
KeyError
If attributes from :func:`~araucaria.stats.pca.pca`
do not exist in ``model``.
See also
--------
:func:`~araucaria.stats.pca.pca` : Performs principal component analysis on a collection.
:func:`~araucaria.plot.fig_pca.fig_target_transform` : Plots the results of target transformation.
Example
-------
>>> from araucaria.testdata import get_testpath
>>> from araucaria import Dataset
>>> from araucaria.io import read_collection_hdf5
>>> from araucaria.xas import pre_edge
>>> from araucaria.stats import pca, target_transform
>>> from araucaria.utils import check_objattrs
>>> fpath = get_testpath('Fe_database.h5')
>>> collection = read_collection_hdf5(fpath)
>>> collection.apply(pre_edge)
>>> model = pca(collection, pca_region='xanes', cumvar=0.9)
>>> data = target_transform(model, collection)
>>> attrs = ['groupnames', 'tmatrix', 'chi2', 'scores', 'energy']
>>> check_objattrs(data, Dataset, attrs)
[True, True, True, True, True]
"""
check_objattrs(model, PCAModel, attrlist=['groupnames', 'matrix',
'variance', 'pca_pars'], exceptions=True)
# retrieving pca parameters
pca_region = model.pca_pars['pca_region']
pca_range = model.pca_pars['pca_range']
# setting panels based on pca region
region = (model.pca_pars['pca_region'])
if region == 'exafs':
xvar = 'k'
kweight = model.pca_pars['kweight']
else:
xvar = 'energy'
kweight = 2
# mapped data for collection
domain = getattr(model, xvar)
xvals, matrix = get_mapped_data(collection, taglist=taglist, region=pca_region,
domain=domain, kweight=kweight)
# centering data
matrix = matrix - matrix.mean(axis=0)
# target transformation
scores = model.transform(matrix)
tmatrix = model.inverse_transform(scores)
chi2 = sum(divide( (matrix-tmatrix)**2, matrix), axis=0)
# storing target transformation results
content = {'groupnames' : collection.get_names(taglist=taglist),
xvar : domain,
'matrix' : matrix,
'tmatrix' : tmatrix,
'scores' : scores,
'chi2' : chi2}
# dataset class
out = Dataset(**content)
return out |
Python | def lcf(collection: Collection, fit_region: str='xanes',
fit_range: list=[-inf,inf], scantag: str='scan',
reftag: str='ref', kweight: int=2, sum_one: bool=True,
method: str='leastsq') -> Dataset:
"""Performs linear combination fitting on a XAFS spectrum.
Parameters
----------
collection
Collection containing the group for LCF analysis and the groups
with the reference scans.
fit_region
XAFS region to perform the LCF. Accepted values are 'dxanes',
'xanes', or 'exafs'. The default is 'xanes'.
fit_range
Domain range in absolute values. Energy units are expected
for 'dxanes' or 'xanes', while wavenumber (k) units are expected
for 'exafs'.
The default is [-:data:`~numpy.inf`, :data:`~numpy.inf`].
scantag
Key to filter the scan group in the collection based on the ``tags``
attribute. The default is 'scan'.
reftag
Key to filter the reference groups in the collection based on the ``tags``
attribute. The default is 'scan'.
kweight
Exponent for weighting chi(k) by k^kweight. Only valid for ``fit_region='exafs'``.
The default is 2.
sum_one
Conditional to force sum of fractions to be one.
The default is True.
method
Fitting method. Currently only local optimization methods are supported.
See the :func:`~lmfit.minimizer.minimize` function of ``lmfit`` for a list
of valid methods.
The default is ``leastsq``.
Returns
-------
:
Fit group with the following arguments:
- ``energy`` : array with energy values.
Returned only if ``fit_region='xanes'`` or ``fit_region='dxanes'``.
- ``k`` : array with wavenumber values.
Returned only if ``fit_region='exafs'``.
- ``scangroup``: name of the group containing the fitted spectrum.
- ``refgroups``: list with names of groups containing reference spectra.
- ``scan`` : array with values of the fitted spectrum.
- ``ref`` : array with interpolated values for each reference spectrum.
- ``fit`` : array with fit result.
- ``min_pars`` : object with the optimized parameters and goodness-of-fit statistics.
- ``lcf_pars`` : dictionary with lcf parameters.
Raises
------
TypeError
If ``collection`` is not a valid Collection instance.
AttributeError
If ``collection`` has no ``tags`` attribute.
AttributeError
If groups have no ``energy`` or ``norm`` attribute.
Only verified if ``fit_region='dxanes'`` or ``fit_region='xanes'``.
AttributeError
If groups have no ``k`` or ``chi`` attribute.
Only verified if and ``fit_region='exafs'``.
KeyError
If ``scantag`` or ``refttag`` are not keys of the ``tags`` attribute.
ValueError
If ``fit_region`` is not recognized.
ValueError
If ``fit_range`` is outside the doamin of a reference group.
Important
---------
If more than one group in ``collection`` is tagged with ``scantag``,
a warning will be raised and only the first group will be fitted.
Notes
-----
The ``min_pars`` object is returned by the :func:`minimize` function of
``lmfit``, and contains the following attributes (non-exhaustive list):
- ``params`` : dictionary with the optimized parameters.
- ``var_names`` : ordered list of parameter names used in optimization.
- ``covar`` : covariance matrix from minimization.
- ``init_vals`` : list of initial values for variable parameters using
``var_names``.
- ``success`` : True if the fit succeeded, otherwise False.
- ``nvarys`` : number of variables.
- ``ndata`` : number of data points.
- ``chisqr`` : chi-square.
- ``redchi`` : reduced chi-square.
- ``residual`` : array with fit residuals.
Example
-------
>>> from numpy.random import seed, normal
>>> from numpy import arange, sin, pi
>>> from araucaria import Group, Dataset, Collection
>>> from araucaria.fit import lcf
>>> from araucaria.utils import check_objattrs
>>> seed(1234) # seed of random values
>>> k = arange(0, 12, 0.05)
>>> eps = normal(0, 0.1, len(k))
>>> f1 = 1.2 # freq 1
>>> f2 = 2.6 # freq 2
>>> amp1 = 0.4 # amp 1
>>> amp2 = 0.6 # amp 2
>>> group1 = Group(**{'name': 'group1', 'k': k, 'chi': sin(2*pi*f1*k)})
>>> group2 = Group(**{'name': 'group2', 'k': k, 'chi': sin(2*pi*f2*k)})
>>> group3 = Group(**{'name': 'group3', 'k': k,
... 'chi' : amp1 * group1.chi + amp2 * group2.chi + eps})
>>> collection = Collection()
>>> tags = ['ref', 'ref', 'scan']
>>> for i, group in enumerate((group1,group2, group3)):
... collection.add_group(group, tag=tags[i])
>>> # performing lcf
>>> out = lcf(collection, fit_region='exafs', fit_range=[3,10],
... kweight=0, sum_one=False)
>>> check_objattrs(out, Dataset,
... attrlist=['k', 'scangroup', 'refgroups',
... 'scan', 'ref1', 'ref2', 'fit', 'min_pars', 'lcf_pars'])
[True, True, True, True, True, True, True, True, True]
>>> for key, val in out.min_pars.params.items():
... print('%1.4f +/- %1.4f' % (val.value, val.stderr))
0.4003 +/- 0.0120
0.5943 +/- 0.0120
"""
# checking class and attributes
check_objattrs(collection, Collection, attrlist=['tags'], exceptions=True)
# verifying fit type
fit_valid = ['dxanes', 'xanes','exafs']
if fit_region not in fit_valid:
raise ValueError('fit_region %s not recognized.'%fit_region)
# required groups
# at least a spectrum and a single reference must be provided
for tag in (scantag, reftag):
if tag not in collection.tags:
raise KeyError("'%s' is not a valid key for the collection." % tag)
# scan and ref tags
scangroup = collection.tags[scantag]
if len(scangroup) > 1:
warn("More than one group is tagged as scan. Only the first group will be considered.")
scangroup = scangroup[0]
refgroups = collection.tags[reftag]
refgroups.sort()
# the first element is the scan group
groups = [scangroup] + refgroups
# storing report parameters
lcf_pars = {'fit_region':fit_region, 'fit_range':fit_range, 'sum_one':sum_one}
# report parameters for exafs lcf
if fit_region == 'exafs':
lcf_pars['kweight'] = kweight
# storing name of x-variable (exafs)
xvar = 'k'
# report parameters for xanes/dxanes lcf
else:
# storing name of x-variable (xanes/dxanes)
xvar = 'energy'
# content dictionary
content = {'scangroup': scangroup,
'refgroups': refgroups}
# reading and processing spectra
for i, name in enumerate(groups):
dname = 'scan' if i==0 else 'ref'+str(i)
group = collection.get_group(name).copy()
if fit_region == 'exafs':
check_objattrs(group, Group, attrlist=['k', 'chi'], exceptions=True)
else:
# fit_region == 'xanes' or 'dxanes'
check_objattrs(group, Group, attrlist=['energy', 'norm'], exceptions=True)
if i == 0:
# first value is the spectrum, so we extract the
# interpolation values from xvar
xvals = getattr(group, xvar)
index = index_xrange(fit_range, xvals)
xvals = xvals[index]
# storing the y-variable
if fit_region == 'exafs':
yvals = xvals**kweight*group.chi[index]
elif fit_region == 'xanes':
yvals = group.norm[index]
else:
# derivative lcf
yvals = gradient(group.norm[index]) / gradient(group.energy[index])
else:
# spline interpolation of references
if fit_region == 'exafs':
s = interp1d(group.k, group.k**kweight*group.chi, kind='cubic')
elif fit_region =='xanes':
s = interp1d(group.energy, group.norm, kind='cubic')
else:
s = interp1d(group.energy, gradient(group.norm)/gradient(group.energy), kind='cubic')
# interpolating in the fit range
try:
yvals = s(xvals)
except:
raise ValueError('fit_range is outside the domain of group %s' % name)
# saving yvals in the dictionary
content[dname] = yvals
# setting xvar as an attribute of datgroup
content[xvar] = xvals
# setting initial values and parameters for fit model
initval = around(1/(len(groups)-1), decimals=1)
params = Parameters()
expr = str(1)
for i in range(len(groups)-1):
parname = 'amp'+str(i+1)
if ( (i == len(groups) - 2) and (sum_one == True) ):
params.add(parname, expr=expr)
else:
params.add(parname, value=initval, min=0, max=1, vary=True)
expr += ' - amp'+str(i+1)
# perform fit
min = minimize(residuals, params, method=method, args=(content,))
# storing fit data, parameters, and results
content['fit'] = sum_references(min.params, content)
content['lcf_pars'] = lcf_pars
content['min_pars'] = min
out = Dataset(**content)
return out | def lcf(collection: Collection, fit_region: str='xanes',
fit_range: list=[-inf,inf], scantag: str='scan',
reftag: str='ref', kweight: int=2, sum_one: bool=True,
method: str='leastsq') -> Dataset:
"""Performs linear combination fitting on a XAFS spectrum.
Parameters
----------
collection
Collection containing the group for LCF analysis and the groups
with the reference scans.
fit_region
XAFS region to perform the LCF. Accepted values are 'dxanes',
'xanes', or 'exafs'. The default is 'xanes'.
fit_range
Domain range in absolute values. Energy units are expected
for 'dxanes' or 'xanes', while wavenumber (k) units are expected
for 'exafs'.
The default is [-:data:`~numpy.inf`, :data:`~numpy.inf`].
scantag
Key to filter the scan group in the collection based on the ``tags``
attribute. The default is 'scan'.
reftag
Key to filter the reference groups in the collection based on the ``tags``
attribute. The default is 'scan'.
kweight
Exponent for weighting chi(k) by k^kweight. Only valid for ``fit_region='exafs'``.
The default is 2.
sum_one
Conditional to force sum of fractions to be one.
The default is True.
method
Fitting method. Currently only local optimization methods are supported.
See the :func:`~lmfit.minimizer.minimize` function of ``lmfit`` for a list
of valid methods.
The default is ``leastsq``.
Returns
-------
:
Fit group with the following arguments:
- ``energy`` : array with energy values.
Returned only if ``fit_region='xanes'`` or ``fit_region='dxanes'``.
- ``k`` : array with wavenumber values.
Returned only if ``fit_region='exafs'``.
- ``scangroup``: name of the group containing the fitted spectrum.
- ``refgroups``: list with names of groups containing reference spectra.
- ``scan`` : array with values of the fitted spectrum.
- ``ref`` : array with interpolated values for each reference spectrum.
- ``fit`` : array with fit result.
- ``min_pars`` : object with the optimized parameters and goodness-of-fit statistics.
- ``lcf_pars`` : dictionary with lcf parameters.
Raises
------
TypeError
If ``collection`` is not a valid Collection instance.
AttributeError
If ``collection`` has no ``tags`` attribute.
AttributeError
If groups have no ``energy`` or ``norm`` attribute.
Only verified if ``fit_region='dxanes'`` or ``fit_region='xanes'``.
AttributeError
If groups have no ``k`` or ``chi`` attribute.
Only verified if and ``fit_region='exafs'``.
KeyError
If ``scantag`` or ``refttag`` are not keys of the ``tags`` attribute.
ValueError
If ``fit_region`` is not recognized.
ValueError
If ``fit_range`` is outside the doamin of a reference group.
Important
---------
If more than one group in ``collection`` is tagged with ``scantag``,
a warning will be raised and only the first group will be fitted.
Notes
-----
The ``min_pars`` object is returned by the :func:`minimize` function of
``lmfit``, and contains the following attributes (non-exhaustive list):
- ``params`` : dictionary with the optimized parameters.
- ``var_names`` : ordered list of parameter names used in optimization.
- ``covar`` : covariance matrix from minimization.
- ``init_vals`` : list of initial values for variable parameters using
``var_names``.
- ``success`` : True if the fit succeeded, otherwise False.
- ``nvarys`` : number of variables.
- ``ndata`` : number of data points.
- ``chisqr`` : chi-square.
- ``redchi`` : reduced chi-square.
- ``residual`` : array with fit residuals.
Example
-------
>>> from numpy.random import seed, normal
>>> from numpy import arange, sin, pi
>>> from araucaria import Group, Dataset, Collection
>>> from araucaria.fit import lcf
>>> from araucaria.utils import check_objattrs
>>> seed(1234) # seed of random values
>>> k = arange(0, 12, 0.05)
>>> eps = normal(0, 0.1, len(k))
>>> f1 = 1.2 # freq 1
>>> f2 = 2.6 # freq 2
>>> amp1 = 0.4 # amp 1
>>> amp2 = 0.6 # amp 2
>>> group1 = Group(**{'name': 'group1', 'k': k, 'chi': sin(2*pi*f1*k)})
>>> group2 = Group(**{'name': 'group2', 'k': k, 'chi': sin(2*pi*f2*k)})
>>> group3 = Group(**{'name': 'group3', 'k': k,
... 'chi' : amp1 * group1.chi + amp2 * group2.chi + eps})
>>> collection = Collection()
>>> tags = ['ref', 'ref', 'scan']
>>> for i, group in enumerate((group1,group2, group3)):
... collection.add_group(group, tag=tags[i])
>>> # performing lcf
>>> out = lcf(collection, fit_region='exafs', fit_range=[3,10],
... kweight=0, sum_one=False)
>>> check_objattrs(out, Dataset,
... attrlist=['k', 'scangroup', 'refgroups',
... 'scan', 'ref1', 'ref2', 'fit', 'min_pars', 'lcf_pars'])
[True, True, True, True, True, True, True, True, True]
>>> for key, val in out.min_pars.params.items():
... print('%1.4f +/- %1.4f' % (val.value, val.stderr))
0.4003 +/- 0.0120
0.5943 +/- 0.0120
"""
# checking class and attributes
check_objattrs(collection, Collection, attrlist=['tags'], exceptions=True)
# verifying fit type
fit_valid = ['dxanes', 'xanes','exafs']
if fit_region not in fit_valid:
raise ValueError('fit_region %s not recognized.'%fit_region)
# required groups
# at least a spectrum and a single reference must be provided
for tag in (scantag, reftag):
if tag not in collection.tags:
raise KeyError("'%s' is not a valid key for the collection." % tag)
# scan and ref tags
scangroup = collection.tags[scantag]
if len(scangroup) > 1:
warn("More than one group is tagged as scan. Only the first group will be considered.")
scangroup = scangroup[0]
refgroups = collection.tags[reftag]
refgroups.sort()
# the first element is the scan group
groups = [scangroup] + refgroups
# storing report parameters
lcf_pars = {'fit_region':fit_region, 'fit_range':fit_range, 'sum_one':sum_one}
# report parameters for exafs lcf
if fit_region == 'exafs':
lcf_pars['kweight'] = kweight
# storing name of x-variable (exafs)
xvar = 'k'
# report parameters for xanes/dxanes lcf
else:
# storing name of x-variable (xanes/dxanes)
xvar = 'energy'
# content dictionary
content = {'scangroup': scangroup,
'refgroups': refgroups}
# reading and processing spectra
for i, name in enumerate(groups):
dname = 'scan' if i==0 else 'ref'+str(i)
group = collection.get_group(name).copy()
if fit_region == 'exafs':
check_objattrs(group, Group, attrlist=['k', 'chi'], exceptions=True)
else:
# fit_region == 'xanes' or 'dxanes'
check_objattrs(group, Group, attrlist=['energy', 'norm'], exceptions=True)
if i == 0:
# first value is the spectrum, so we extract the
# interpolation values from xvar
xvals = getattr(group, xvar)
index = index_xrange(fit_range, xvals)
xvals = xvals[index]
# storing the y-variable
if fit_region == 'exafs':
yvals = xvals**kweight*group.chi[index]
elif fit_region == 'xanes':
yvals = group.norm[index]
else:
# derivative lcf
yvals = gradient(group.norm[index]) / gradient(group.energy[index])
else:
# spline interpolation of references
if fit_region == 'exafs':
s = interp1d(group.k, group.k**kweight*group.chi, kind='cubic')
elif fit_region =='xanes':
s = interp1d(group.energy, group.norm, kind='cubic')
else:
s = interp1d(group.energy, gradient(group.norm)/gradient(group.energy), kind='cubic')
# interpolating in the fit range
try:
yvals = s(xvals)
except:
raise ValueError('fit_range is outside the domain of group %s' % name)
# saving yvals in the dictionary
content[dname] = yvals
# setting xvar as an attribute of datgroup
content[xvar] = xvals
# setting initial values and parameters for fit model
initval = around(1/(len(groups)-1), decimals=1)
params = Parameters()
expr = str(1)
for i in range(len(groups)-1):
parname = 'amp'+str(i+1)
if ( (i == len(groups) - 2) and (sum_one == True) ):
params.add(parname, expr=expr)
else:
params.add(parname, value=initval, min=0, max=1, vary=True)
expr += ' - amp'+str(i+1)
# perform fit
min = minimize(residuals, params, method=method, args=(content,))
# storing fit data, parameters, and results
content['fit'] = sum_references(min.params, content)
content['lcf_pars'] = lcf_pars
content['min_pars'] = min
out = Dataset(**content)
return out |
Python | def sum_references(pars: Parameter, data: dict) -> ndarray:
"""Returns the sum of references weighted by amplitude coefficients.
Parameters
----------
pars
Parameter object from ``lmfit`` containing the amplitude
coefficients for each reference spectrum. At least attribute
'amp1' should exist in the object.
data
Dictionary with the reference arrays. At leasr key 'ref1'
should exist in the dictionary.
Returns
-------
:
Sum of references weighted by amplitude coefficients.
Important
-----
The number of 'amp' attributes in ``pars`` should match the
number of 'ref' keys in ``data``.
Example
-------
>>> from numpy import allclose
>>> from lmfit import Parameters
>>> from araucaria.fit import sum_references
>>> pars = Parameters()
>>> pars.add('amp1', value=0.4)
>>> pars.add('amp2', value=0.7)
>>> data = {'ref1': 1.0, 'ref2': 2.0}
>>> allclose(sum_references(pars, data), 1.8)
True
"""
return (sum([pars['amp'+str(i)]* data['ref'+str(i)]
for i in range(1,len(pars)+1)], axis=0)) | def sum_references(pars: Parameter, data: dict) -> ndarray:
"""Returns the sum of references weighted by amplitude coefficients.
Parameters
----------
pars
Parameter object from ``lmfit`` containing the amplitude
coefficients for each reference spectrum. At least attribute
'amp1' should exist in the object.
data
Dictionary with the reference arrays. At leasr key 'ref1'
should exist in the dictionary.
Returns
-------
:
Sum of references weighted by amplitude coefficients.
Important
-----
The number of 'amp' attributes in ``pars`` should match the
number of 'ref' keys in ``data``.
Example
-------
>>> from numpy import allclose
>>> from lmfit import Parameters
>>> from araucaria.fit import sum_references
>>> pars = Parameters()
>>> pars.add('amp1', value=0.4)
>>> pars.add('amp2', value=0.7)
>>> data = {'ref1': 1.0, 'ref2': 2.0}
>>> allclose(sum_references(pars, data), 1.8)
True
"""
return (sum([pars['amp'+str(i)]* data['ref'+str(i)]
for i in range(1,len(pars)+1)], axis=0)) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.