repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
f2fs-stable-linux-3.18.y
|
f2fs-stable-linux-3.18.y/tools/perf/scripts/python/sched-migration.py
|
#!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm, common_callchain,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid):
pass
def trace_unhandled(event_name, context, event_fields_dict):
pass
| 11,965 | 24.956616 | 88 |
py
|
f2fs-stable-linux-3.18.y
|
f2fs-stable-linux-3.18.y/tools/perf/scripts/python/net_dropmonitor.py
|
# Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| 1,749 | 22.026316 | 90 |
py
|
f2fs-stable-linux-3.18.y
|
f2fs-stable-linux-3.18.y/tools/perf/scripts/python/failed-syscalls-by-pid.py
|
# failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
raw_syscalls__sys_exit(**locals())
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| 2,233 | 27.278481 | 112 |
py
|
f2fs-stable-linux-3.18.y
|
f2fs-stable-linux-3.18.y/tools/perf/scripts/python/netdev-times.py
|
# Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
callchain, irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| 15,191 | 31.670968 | 89 |
py
|
f2fs-stable-linux-3.18.y
|
f2fs-stable-linux-3.18.y/tools/perf/scripts/python/futex-contention.py
|
# futex contention
# (c) 2010, Arnaldo Carvalho de Melo <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| 1,508 | 28.588235 | 96 |
py
|
f2fs-stable-linux-3.18.y
|
f2fs-stable-linux-3.18.y/tools/perf/scripts/python/check-perf-trace.py
|
# perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| 2,539 | 29.60241 | 78 |
py
|
f2fs-stable-linux-3.18.y
|
f2fs-stable-linux-3.18.y/tools/perf/scripts/python/event_analyzing_sample.py
|
# event_analyzing_sample.py: general event handler in python
#
# Current perf report is already very powerful with the annotation integrated,
# and this script is not trying to be as powerful as perf report, but
# providing end user/developer a flexible way to analyze the events other
# than trace points.
#
# The 2 database related functions in this script just show how to gather
# the basic information, and users can modify and write their own functions
# according to their specific requirement.
#
# The first function "show_general_events" just does a basic grouping for all
# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
# for a x86 HW PMU event: PEBS with load latency data.
#
import os
import sys
import math
import struct
import sqlite3
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from EventClass import *
#
# If the perf.data has a big number of samples, then the insert operation
# will be very time consuming (about 10+ minutes for 10000 samples) if the
# .db database is on disk. Move the .db file to RAM based FS to speedup
# the handling, which will cut the time down to several seconds.
#
con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
print "In trace_begin:\n"
#
# Will create several tables at the start, pebs_ll is for PEBS data with
# load latency info, while gen_events is for general event.
#
con.execute("""
create table if not exists gen_events (
name text,
symbol text,
comm text,
dso text
);""")
con.execute("""
create table if not exists pebs_ll (
name text,
symbol text,
comm text,
dso text,
flags integer,
ip integer,
status integer,
dse integer,
dla integer,
lat integer
);""")
#
# Create and insert event object to a database so that user could
# do more analysis with simple database commands.
#
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if (param_dict.has_key("dso")):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
if (param_dict.has_key("symbol")):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
# Create the event object and insert it to the right table in database
event = create_event(name, comm, dso, symbol, raw_buf)
insert_db(event)
def insert_db(event):
if event.ev_type == EVTYPE_GENERIC:
con.execute("insert into gen_events values(?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso))
elif event.ev_type == EVTYPE_PEBS_LL:
event.ip &= 0x7fffffffffffffff
event.dla &= 0x7fffffffffffffff
con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso, event.flags,
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
print "In trace_end:\n"
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
con.close()
#
# As the event number may be very big, so we can't use linear way
# to show the histogram in real number, but use a log2 algorithm.
#
def num2sym(num):
# Each number will have at least one '#'
snum = '#' * (int)(math.log(num, 2) + 1)
return snum
def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
print "There is %d records in gen_events table" % t[0]
if t[0] == 0:
return
print "Statistics about the general events grouped by thread/symbol/dso: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dso
print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
#
# This function just shows the basic info, and we could do more with the
# data in the tables, like checking the function parameters when some
# big latency events happen.
#
def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
print "There is %d records in pebs_ll table" % t[0]
if t[0] == 0:
return
print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
for row in dseq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
for row in latq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
| 7,393 | 37.915789 | 117 |
py
|
f2fs-stable-linux-3.18.y
|
f2fs-stable-linux-3.18.y/tools/perf/scripts/python/syscall-counts-by-pid.py
|
# system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| 2,105 | 27.08 | 77 |
py
|
f2fs-stable-linux-3.18.y
|
f2fs-stable-linux-3.18.y/tools/perf/scripts/python/sctop.py
|
# system call top
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| 2,102 | 24.962963 | 75 |
py
|
f2fs-stable-linux-3.18.y
|
f2fs-stable-linux-3.18.y/tools/perf/scripts/python/syscall-counts.py
|
# system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| 1,700 | 25.169231 | 77 |
py
|
f2fs-stable-linux-3.18.y
|
f2fs-stable-linux-3.18.y/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
|
# Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| 1,935 | 21.252874 | 72 |
py
|
f2fs-stable-linux-3.18.y
|
f2fs-stable-linux-3.18.y/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
|
# SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| 5,411 | 28.254054 | 158 |
py
|
f2fs-stable-linux-3.18.y
|
f2fs-stable-linux-3.18.y/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
|
# Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
self.callchain = common_callchain
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| 3,300 | 25.837398 | 86 |
py
|
f2fs-stable-linux-3.18.y
|
f2fs-stable-linux-3.18.y/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
|
# EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
| 3,596 | 36.863158 | 125 |
py
|
f2fs-stable-linux-3.18.y
|
f2fs-stable-linux-3.18.y/tools/perf/tests/attr.py
|
#! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
class Event(dict):
terms = [
'cpu',
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
'exclude_callchain_kernel',
'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
'sample_regs_user',
'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.debug(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
def diff(self, other):
for t in Event.terms:
if not self.has_key(t) or not other.has_key(t):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.debug(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.info(" '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.debug(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
exp_event.diff(res_event)
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.debug(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.debug(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.debug(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
| 9,441 | 27.354354 | 79 |
py
|
f2fs-stable-linux-3.18.y
|
f2fs-stable-linux-3.18.y/Documentation/networking/cxacru-cf.py
|
#!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| 1,626 | 32.204082 | 78 |
py
|
f2fs-stable-linux-3.18.y
|
f2fs-stable-linux-3.18.y/Documentation/target/tcm_mod_builder.py
|
#!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| 40,692 | 37.792183 | 162 |
py
|
f2fs-stable-linux-3.18.y
|
f2fs-stable-linux-3.18.y/scripts/analyze_suspend.py
|
#!/usr/bin/python
#
# Tool for analyzing suspend/resume timing
# Copyright (c) 2013, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# Authors:
# Todd Brandt <[email protected]>
#
# Description:
# This tool is designed to assist kernel and OS developers in optimizing
# their linux stack's suspend/resume time. Using a kernel image built
# with a few extra options enabled, the tool will execute a suspend and
# will capture dmesg and ftrace data until resume is complete. This data
# is transformed into a device timeline and a callgraph to give a quick
# and detailed view of which devices and callbacks are taking the most
# time in suspend/resume. The output is a single html file which can be
# viewed in firefox or chrome.
#
# The following kernel build options are required:
# CONFIG_PM_DEBUG=y
# CONFIG_PM_SLEEP_DEBUG=y
# CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER=y
# CONFIG_FUNCTION_GRAPH_TRACER=y
#
# For kernel versions older than 3.15:
# The following additional kernel parameters are required:
# (e.g. in file /etc/default/grub)
# GRUB_CMDLINE_LINUX_DEFAULT="... initcall_debug log_buf_len=16M ..."
#
# ----------------- LIBRARIES --------------------
import sys
import time
import os
import string
import re
import platform
from datetime import datetime
import struct
# ----------------- CLASSES --------------------
# Class: SystemValues
# Description:
# A global, single-instance container used to
# store system values and test parameters
class SystemValues:
version = 3.0
verbose = False
testdir = '.'
tpath = '/sys/kernel/debug/tracing/'
fpdtpath = '/sys/firmware/acpi/tables/FPDT'
epath = '/sys/kernel/debug/tracing/events/power/'
traceevents = [
'suspend_resume',
'device_pm_callback_end',
'device_pm_callback_start'
]
modename = {
'freeze': 'Suspend-To-Idle (S0)',
'standby': 'Power-On Suspend (S1)',
'mem': 'Suspend-to-RAM (S3)',
'disk': 'Suspend-to-disk (S4)'
}
mempath = '/dev/mem'
powerfile = '/sys/power/state'
suspendmode = 'mem'
hostname = 'localhost'
prefix = 'test'
teststamp = ''
dmesgfile = ''
ftracefile = ''
htmlfile = ''
rtcwake = False
rtcwaketime = 10
rtcpath = ''
android = False
adb = 'adb'
devicefilter = []
stamp = 0
execcount = 1
x2delay = 0
usecallgraph = False
usetraceevents = False
usetraceeventsonly = False
notestrun = False
altdevname = dict()
postresumetime = 0
tracertypefmt = '# tracer: (?P<t>.*)'
firmwarefmt = '# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$'
postresumefmt = '# post resume time (?P<t>[0-9]*)$'
stampfmt = '# suspend-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-'+\
'(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})'+\
' (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$'
def __init__(self):
self.hostname = platform.node()
if(self.hostname == ''):
self.hostname = 'localhost'
rtc = "rtc0"
if os.path.exists('/dev/rtc'):
rtc = os.readlink('/dev/rtc')
rtc = '/sys/class/rtc/'+rtc
if os.path.exists(rtc) and os.path.exists(rtc+'/date') and \
os.path.exists(rtc+'/time') and os.path.exists(rtc+'/wakealarm'):
self.rtcpath = rtc
def setOutputFile(self):
if((self.htmlfile == '') and (self.dmesgfile != '')):
m = re.match('(?P<name>.*)_dmesg\.txt$', self.dmesgfile)
if(m):
self.htmlfile = m.group('name')+'.html'
if((self.htmlfile == '') and (self.ftracefile != '')):
m = re.match('(?P<name>.*)_ftrace\.txt$', self.ftracefile)
if(m):
self.htmlfile = m.group('name')+'.html'
if(self.htmlfile == ''):
self.htmlfile = 'output.html'
def initTestOutput(self, subdir):
if(not self.android):
self.prefix = self.hostname
v = open('/proc/version', 'r').read().strip()
kver = string.split(v)[2]
else:
self.prefix = 'android'
v = os.popen(self.adb+' shell cat /proc/version').read().strip()
kver = string.split(v)[2]
testtime = datetime.now().strftime('suspend-%m%d%y-%H%M%S')
if(subdir != "."):
self.testdir = subdir+"/"+testtime
else:
self.testdir = testtime
self.teststamp = \
'# '+testtime+' '+self.prefix+' '+self.suspendmode+' '+kver
self.dmesgfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_dmesg.txt'
self.ftracefile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_ftrace.txt'
self.htmlfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'.html'
os.mkdir(self.testdir)
def setDeviceFilter(self, devnames):
self.devicefilter = string.split(devnames)
def rtcWakeAlarm(self):
os.system('echo 0 > '+self.rtcpath+'/wakealarm')
outD = open(self.rtcpath+'/date', 'r').read().strip()
outT = open(self.rtcpath+'/time', 'r').read().strip()
mD = re.match('^(?P<y>[0-9]*)-(?P<m>[0-9]*)-(?P<d>[0-9]*)', outD)
mT = re.match('^(?P<h>[0-9]*):(?P<m>[0-9]*):(?P<s>[0-9]*)', outT)
if(mD and mT):
# get the current time from hardware
utcoffset = int((datetime.now() - datetime.utcnow()).total_seconds())
dt = datetime(\
int(mD.group('y')), int(mD.group('m')), int(mD.group('d')),
int(mT.group('h')), int(mT.group('m')), int(mT.group('s')))
nowtime = int(dt.strftime('%s')) + utcoffset
else:
# if hardware time fails, use the software time
nowtime = int(datetime.now().strftime('%s'))
alarm = nowtime + self.rtcwaketime
os.system('echo %d > %s/wakealarm' % (alarm, self.rtcpath))
sysvals = SystemValues()
# Class: DeviceNode
# Description:
# A container used to create a device hierachy, with a single root node
# and a tree of child nodes. Used by Data.deviceTopology()
class DeviceNode:
name = ''
children = 0
depth = 0
def __init__(self, nodename, nodedepth):
self.name = nodename
self.children = []
self.depth = nodedepth
# Class: Data
# Description:
# The primary container for suspend/resume test data. There is one for
# each test run. The data is organized into a cronological hierarchy:
# Data.dmesg {
# root structure, started as dmesg & ftrace, but now only ftrace
# contents: times for suspend start/end, resume start/end, fwdata
# phases {
# 10 sequential, non-overlapping phases of S/R
# contents: times for phase start/end, order/color data for html
# devlist {
# device callback or action list for this phase
# device {
# a single device callback or generic action
# contents: start/stop times, pid/cpu/driver info
# parents/children, html id for timeline/callgraph
# optionally includes an ftrace callgraph
# optionally includes intradev trace events
# }
# }
# }
# }
#
class Data:
dmesg = {} # root data structure
phases = [] # ordered list of phases
start = 0.0 # test start
end = 0.0 # test end
tSuspended = 0.0 # low-level suspend start
tResumed = 0.0 # low-level resume start
tLow = 0.0 # time spent in low-level suspend (standby/freeze)
fwValid = False # is firmware data available
fwSuspend = 0 # time spent in firmware suspend
fwResume = 0 # time spent in firmware resume
dmesgtext = [] # dmesg text file in memory
testnumber = 0
idstr = ''
html_device_id = 0
stamp = 0
outfile = ''
def __init__(self, num):
idchar = 'abcdefghijklmnopqrstuvwxyz'
self.testnumber = num
self.idstr = idchar[num]
self.dmesgtext = []
self.phases = []
self.dmesg = { # fixed list of 10 phases
'suspend_prepare': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#CCFFCC', 'order': 0},
'suspend': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#88FF88', 'order': 1},
'suspend_late': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#00AA00', 'order': 2},
'suspend_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#008888', 'order': 3},
'suspend_machine': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#0000FF', 'order': 4},
'resume_machine': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FF0000', 'order': 5},
'resume_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FF9900', 'order': 6},
'resume_early': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFCC00', 'order': 7},
'resume': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFFF88', 'order': 8},
'resume_complete': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFFFCC', 'order': 9}
}
self.phases = self.sortedPhases()
def getStart(self):
return self.dmesg[self.phases[0]]['start']
def setStart(self, time):
self.start = time
self.dmesg[self.phases[0]]['start'] = time
def getEnd(self):
return self.dmesg[self.phases[-1]]['end']
def setEnd(self, time):
self.end = time
self.dmesg[self.phases[-1]]['end'] = time
def isTraceEventOutsideDeviceCalls(self, pid, time):
for phase in self.phases:
list = self.dmesg[phase]['list']
for dev in list:
d = list[dev]
if(d['pid'] == pid and time >= d['start'] and
time <= d['end']):
return False
return True
def addIntraDevTraceEvent(self, action, name, pid, time):
if(action == 'mutex_lock_try'):
color = 'red'
elif(action == 'mutex_lock_pass'):
color = 'green'
elif(action == 'mutex_unlock'):
color = 'blue'
else:
# create separate colors based on the name
v1 = len(name)*10 % 256
v2 = string.count(name, 'e')*100 % 256
v3 = ord(name[0])*20 % 256
color = '#%06X' % ((v1*0x10000) + (v2*0x100) + v3)
for phase in self.phases:
list = self.dmesg[phase]['list']
for dev in list:
d = list[dev]
if(d['pid'] == pid and time >= d['start'] and
time <= d['end']):
e = TraceEvent(action, name, color, time)
if('traceevents' not in d):
d['traceevents'] = []
d['traceevents'].append(e)
return d
break
return 0
def capIntraDevTraceEvent(self, action, name, pid, time):
for phase in self.phases:
list = self.dmesg[phase]['list']
for dev in list:
d = list[dev]
if(d['pid'] == pid and time >= d['start'] and
time <= d['end']):
if('traceevents' not in d):
return
for e in d['traceevents']:
if(e.action == action and
e.name == name and not e.ready):
e.length = time - e.time
e.ready = True
break
return
def trimTimeVal(self, t, t0, dT, left):
if left:
if(t > t0):
if(t - dT < t0):
return t0
return t - dT
else:
return t
else:
if(t < t0 + dT):
if(t > t0):
return t0 + dT
return t + dT
else:
return t
def trimTime(self, t0, dT, left):
self.tSuspended = self.trimTimeVal(self.tSuspended, t0, dT, left)
self.tResumed = self.trimTimeVal(self.tResumed, t0, dT, left)
self.start = self.trimTimeVal(self.start, t0, dT, left)
self.end = self.trimTimeVal(self.end, t0, dT, left)
for phase in self.phases:
p = self.dmesg[phase]
p['start'] = self.trimTimeVal(p['start'], t0, dT, left)
p['end'] = self.trimTimeVal(p['end'], t0, dT, left)
list = p['list']
for name in list:
d = list[name]
d['start'] = self.trimTimeVal(d['start'], t0, dT, left)
d['end'] = self.trimTimeVal(d['end'], t0, dT, left)
if('ftrace' in d):
cg = d['ftrace']
cg.start = self.trimTimeVal(cg.start, t0, dT, left)
cg.end = self.trimTimeVal(cg.end, t0, dT, left)
for line in cg.list:
line.time = self.trimTimeVal(line.time, t0, dT, left)
if('traceevents' in d):
for e in d['traceevents']:
e.time = self.trimTimeVal(e.time, t0, dT, left)
def normalizeTime(self, tZero):
# first trim out any standby or freeze clock time
if(self.tSuspended != self.tResumed):
if(self.tResumed > tZero):
self.trimTime(self.tSuspended, \
self.tResumed-self.tSuspended, True)
else:
self.trimTime(self.tSuspended, \
self.tResumed-self.tSuspended, False)
# shift the timeline so that tZero is the new 0
self.tSuspended -= tZero
self.tResumed -= tZero
self.start -= tZero
self.end -= tZero
for phase in self.phases:
p = self.dmesg[phase]
p['start'] -= tZero
p['end'] -= tZero
list = p['list']
for name in list:
d = list[name]
d['start'] -= tZero
d['end'] -= tZero
if('ftrace' in d):
cg = d['ftrace']
cg.start -= tZero
cg.end -= tZero
for line in cg.list:
line.time -= tZero
if('traceevents' in d):
for e in d['traceevents']:
e.time -= tZero
def newPhaseWithSingleAction(self, phasename, devname, start, end, color):
for phase in self.phases:
self.dmesg[phase]['order'] += 1
self.html_device_id += 1
devid = '%s%d' % (self.idstr, self.html_device_id)
list = dict()
list[devname] = \
{'start': start, 'end': end, 'pid': 0, 'par': '',
'length': (end-start), 'row': 0, 'id': devid, 'drv': '' };
self.dmesg[phasename] = \
{'list': list, 'start': start, 'end': end,
'row': 0, 'color': color, 'order': 0}
self.phases = self.sortedPhases()
def newPhase(self, phasename, start, end, color, order):
if(order < 0):
order = len(self.phases)
for phase in self.phases[order:]:
self.dmesg[phase]['order'] += 1
if(order > 0):
p = self.phases[order-1]
self.dmesg[p]['end'] = start
if(order < len(self.phases)):
p = self.phases[order]
self.dmesg[p]['start'] = end
list = dict()
self.dmesg[phasename] = \
{'list': list, 'start': start, 'end': end,
'row': 0, 'color': color, 'order': order}
self.phases = self.sortedPhases()
def setPhase(self, phase, ktime, isbegin):
if(isbegin):
self.dmesg[phase]['start'] = ktime
else:
self.dmesg[phase]['end'] = ktime
def dmesgSortVal(self, phase):
return self.dmesg[phase]['order']
def sortedPhases(self):
return sorted(self.dmesg, key=self.dmesgSortVal)
def sortedDevices(self, phase):
list = self.dmesg[phase]['list']
slist = []
tmp = dict()
for devname in list:
dev = list[devname]
tmp[dev['start']] = devname
for t in sorted(tmp):
slist.append(tmp[t])
return slist
def fixupInitcalls(self, phase, end):
# if any calls never returned, clip them at system resume end
phaselist = self.dmesg[phase]['list']
for devname in phaselist:
dev = phaselist[devname]
if(dev['end'] < 0):
dev['end'] = end
vprint('%s (%s): callback didnt return' % (devname, phase))
def deviceFilter(self, devicefilter):
# remove all by the relatives of the filter devnames
filter = []
for phase in self.phases:
list = self.dmesg[phase]['list']
for name in devicefilter:
dev = name
while(dev in list):
if(dev not in filter):
filter.append(dev)
dev = list[dev]['par']
children = self.deviceDescendants(name, phase)
for dev in children:
if(dev not in filter):
filter.append(dev)
for phase in self.phases:
list = self.dmesg[phase]['list']
rmlist = []
for name in list:
pid = list[name]['pid']
if(name not in filter and pid >= 0):
rmlist.append(name)
for name in rmlist:
del list[name]
def fixupInitcallsThatDidntReturn(self):
# if any calls never returned, clip them at system resume end
for phase in self.phases:
self.fixupInitcalls(phase, self.getEnd())
def newActionGlobal(self, name, start, end):
# which phase is this device callback or action "in"
targetphase = "none"
overlap = 0.0
for phase in self.phases:
pstart = self.dmesg[phase]['start']
pend = self.dmesg[phase]['end']
o = max(0, min(end, pend) - max(start, pstart))
if(o > overlap):
targetphase = phase
overlap = o
if targetphase in self.phases:
self.newAction(targetphase, name, -1, '', start, end, '')
return True
return False
def newAction(self, phase, name, pid, parent, start, end, drv):
# new device callback for a specific phase
self.html_device_id += 1
devid = '%s%d' % (self.idstr, self.html_device_id)
list = self.dmesg[phase]['list']
length = -1.0
if(start >= 0 and end >= 0):
length = end - start
list[name] = {'start': start, 'end': end, 'pid': pid, 'par': parent,
'length': length, 'row': 0, 'id': devid, 'drv': drv }
def deviceIDs(self, devlist, phase):
idlist = []
list = self.dmesg[phase]['list']
for devname in list:
if devname in devlist:
idlist.append(list[devname]['id'])
return idlist
def deviceParentID(self, devname, phase):
pdev = ''
pdevid = ''
list = self.dmesg[phase]['list']
if devname in list:
pdev = list[devname]['par']
if pdev in list:
return list[pdev]['id']
return pdev
def deviceChildren(self, devname, phase):
devlist = []
list = self.dmesg[phase]['list']
for child in list:
if(list[child]['par'] == devname):
devlist.append(child)
return devlist
def deviceDescendants(self, devname, phase):
children = self.deviceChildren(devname, phase)
family = children
for child in children:
family += self.deviceDescendants(child, phase)
return family
def deviceChildrenIDs(self, devname, phase):
devlist = self.deviceChildren(devname, phase)
return self.deviceIDs(devlist, phase)
def printDetails(self):
vprint(' test start: %f' % self.start)
for phase in self.phases:
dc = len(self.dmesg[phase]['list'])
vprint(' %16s: %f - %f (%d devices)' % (phase, \
self.dmesg[phase]['start'], self.dmesg[phase]['end'], dc))
vprint(' test end: %f' % self.end)
def masterTopology(self, name, list, depth):
node = DeviceNode(name, depth)
for cname in list:
clist = self.deviceChildren(cname, 'resume')
cnode = self.masterTopology(cname, clist, depth+1)
node.children.append(cnode)
return node
def printTopology(self, node):
html = ''
if node.name:
info = ''
drv = ''
for phase in self.phases:
list = self.dmesg[phase]['list']
if node.name in list:
s = list[node.name]['start']
e = list[node.name]['end']
if list[node.name]['drv']:
drv = ' {'+list[node.name]['drv']+'}'
info += ('<li>%s: %.3fms</li>' % (phase, (e-s)*1000))
html += '<li><b>'+node.name+drv+'</b>'
if info:
html += '<ul>'+info+'</ul>'
html += '</li>'
if len(node.children) > 0:
html += '<ul>'
for cnode in node.children:
html += self.printTopology(cnode)
html += '</ul>'
return html
def rootDeviceList(self):
# list of devices graphed
real = []
for phase in self.dmesg:
list = self.dmesg[phase]['list']
for dev in list:
if list[dev]['pid'] >= 0 and dev not in real:
real.append(dev)
# list of top-most root devices
rootlist = []
for phase in self.dmesg:
list = self.dmesg[phase]['list']
for dev in list:
pdev = list[dev]['par']
if(re.match('[0-9]*-[0-9]*\.[0-9]*[\.0-9]*\:[\.0-9]*$', pdev)):
continue
if pdev and pdev not in real and pdev not in rootlist:
rootlist.append(pdev)
return rootlist
def deviceTopology(self):
rootlist = self.rootDeviceList()
master = self.masterTopology('', rootlist, 0)
return self.printTopology(master)
# Class: TraceEvent
# Description:
# A container for trace event data found in the ftrace file
class TraceEvent:
ready = False
name = ''
time = 0.0
color = '#FFFFFF'
length = 0.0
action = ''
def __init__(self, a, n, c, t):
self.action = a
self.name = n
self.color = c
self.time = t
# Class: FTraceLine
# Description:
# A container for a single line of ftrace data. There are six basic types:
# callgraph line:
# call: " dpm_run_callback() {"
# return: " }"
# leaf: " dpm_run_callback();"
# trace event:
# tracing_mark_write: SUSPEND START or RESUME COMPLETE
# suspend_resume: phase or custom exec block data
# device_pm_callback: device callback info
class FTraceLine:
time = 0.0
length = 0.0
fcall = False
freturn = False
fevent = False
depth = 0
name = ''
type = ''
def __init__(self, t, m, d):
self.time = float(t)
# is this a trace event
if(d == 'traceevent' or re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)):
if(d == 'traceevent'):
# nop format trace event
msg = m
else:
# function_graph format trace event
em = re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)
msg = em.group('msg')
emm = re.match('^(?P<call>.*?): (?P<msg>.*)', msg)
if(emm):
self.name = emm.group('msg')
self.type = emm.group('call')
else:
self.name = msg
self.fevent = True
return
# convert the duration to seconds
if(d):
self.length = float(d)/1000000
# the indentation determines the depth
match = re.match('^(?P<d> *)(?P<o>.*)$', m)
if(not match):
return
self.depth = self.getDepth(match.group('d'))
m = match.group('o')
# function return
if(m[0] == '}'):
self.freturn = True
if(len(m) > 1):
# includes comment with function name
match = re.match('^} *\/\* *(?P<n>.*) *\*\/$', m)
if(match):
self.name = match.group('n')
# function call
else:
self.fcall = True
# function call with children
if(m[-1] == '{'):
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n')
# function call with no children (leaf)
elif(m[-1] == ';'):
self.freturn = True
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n')
# something else (possibly a trace marker)
else:
self.name = m
def getDepth(self, str):
return len(str)/2
def debugPrint(self, dev):
if(self.freturn and self.fcall):
print('%s -- %f (%02d): %s(); (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
elif(self.freturn):
print('%s -- %f (%02d): %s} (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
else:
print('%s -- %f (%02d): %s() { (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
# Class: FTraceCallGraph
# Description:
# A container for the ftrace callgraph of a single recursive function.
# This can be a dpm_run_callback, dpm_prepare, or dpm_complete callgraph
# Each instance is tied to a single device in a single phase, and is
# comprised of an ordered list of FTraceLine objects
class FTraceCallGraph:
start = -1.0
end = -1.0
list = []
invalid = False
depth = 0
def __init__(self):
self.start = -1.0
self.end = -1.0
self.list = []
self.depth = 0
def setDepth(self, line):
if(line.fcall and not line.freturn):
line.depth = self.depth
self.depth += 1
elif(line.freturn and not line.fcall):
self.depth -= 1
line.depth = self.depth
else:
line.depth = self.depth
def addLine(self, line, match):
if(not self.invalid):
self.setDepth(line)
if(line.depth == 0 and line.freturn):
if(self.start < 0):
self.start = line.time
self.end = line.time
self.list.append(line)
return True
if(self.invalid):
return False
if(len(self.list) >= 1000000 or self.depth < 0):
if(len(self.list) > 0):
first = self.list[0]
self.list = []
self.list.append(first)
self.invalid = True
if(not match):
return False
id = 'task %s cpu %s' % (match.group('pid'), match.group('cpu'))
window = '(%f - %f)' % (self.start, line.time)
if(self.depth < 0):
print('Too much data for '+id+\
' (buffer overflow), ignoring this callback')
else:
print('Too much data for '+id+\
' '+window+', ignoring this callback')
return False
self.list.append(line)
if(self.start < 0):
self.start = line.time
return False
def slice(self, t0, tN):
minicg = FTraceCallGraph()
count = -1
firstdepth = 0
for l in self.list:
if(l.time < t0 or l.time > tN):
continue
if(count < 0):
if(not l.fcall or l.name == 'dev_driver_string'):
continue
firstdepth = l.depth
count = 0
l.depth -= firstdepth
minicg.addLine(l, 0)
if((count == 0 and l.freturn and l.fcall) or
(count > 0 and l.depth <= 0)):
break
count += 1
return minicg
def sanityCheck(self):
stack = dict()
cnt = 0
for l in self.list:
if(l.fcall and not l.freturn):
stack[l.depth] = l
cnt += 1
elif(l.freturn and not l.fcall):
if(l.depth not in stack):
return False
stack[l.depth].length = l.length
stack[l.depth] = 0
l.length = 0
cnt -= 1
if(cnt == 0):
return True
return False
def debugPrint(self, filename):
if(filename == 'stdout'):
print('[%f - %f]') % (self.start, self.end)
for l in self.list:
if(l.freturn and l.fcall):
print('%f (%02d): %s(); (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
elif(l.freturn):
print('%f (%02d): %s} (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
else:
print('%f (%02d): %s() { (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
print(' ')
else:
fp = open(filename, 'w')
print(filename)
for l in self.list:
if(l.freturn and l.fcall):
fp.write('%f (%02d): %s(); (%.3f us)\n' % (l.time, \
l.depth, l.name, l.length*1000000))
elif(l.freturn):
fp.write('%f (%02d): %s} (%.3f us)\n' % (l.time, \
l.depth, l.name, l.length*1000000))
else:
fp.write('%f (%02d): %s() { (%.3f us)\n' % (l.time, \
l.depth, l.name, l.length*1000000))
fp.close()
# Class: Timeline
# Description:
# A container for a suspend/resume html timeline. In older versions
# of the script there were multiple timelines, but in the latest
# there is only one.
class Timeline:
html = {}
scaleH = 0.0 # height of the row as a percent of the timeline height
rowH = 0.0 # height of each row in percent of the timeline height
row_height_pixels = 30
maxrows = 0
height = 0
def __init__(self):
self.html = {
'timeline': '',
'legend': '',
'scale': ''
}
def setRows(self, rows):
self.maxrows = int(rows)
self.scaleH = 100.0/float(self.maxrows)
self.height = self.maxrows*self.row_height_pixels
r = float(self.maxrows - 1)
if(r < 1.0):
r = 1.0
self.rowH = (100.0 - self.scaleH)/r
# Class: TestRun
# Description:
# A container for a suspend/resume test run. This is necessary as
# there could be more than one, and they need to be separate.
class TestRun:
ftrace_line_fmt_fg = \
'^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)'+\
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\|'+\
'[ +!]*(?P<dur>[0-9\.]*) .*\| (?P<msg>.*)'
ftrace_line_fmt_nop = \
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\[(?P<cpu>[0-9]*)\] *'+\
'(?P<flags>.{4}) *(?P<time>[0-9\.]*): *'+\
'(?P<msg>.*)'
ftrace_line_fmt = ftrace_line_fmt_nop
cgformat = False
ftemp = dict()
ttemp = dict()
inthepipe = False
tracertype = ''
data = 0
def __init__(self, dataobj):
self.data = dataobj
self.ftemp = dict()
self.ttemp = dict()
def isReady(self):
if(tracertype == '' or not data):
return False
return True
def setTracerType(self, tracer):
self.tracertype = tracer
if(tracer == 'function_graph'):
self.cgformat = True
self.ftrace_line_fmt = self.ftrace_line_fmt_fg
elif(tracer == 'nop'):
self.ftrace_line_fmt = self.ftrace_line_fmt_nop
else:
doError('Invalid tracer format: [%s]' % tracer, False)
# ----------------- FUNCTIONS --------------------
# Function: vprint
# Description:
# verbose print (prints only with -verbose option)
# Arguments:
# msg: the debug/log message to print
def vprint(msg):
global sysvals
if(sysvals.verbose):
print(msg)
# Function: initFtrace
# Description:
# Configure ftrace to use trace events and/or a callgraph
def initFtrace():
global sysvals
tp = sysvals.tpath
cf = 'dpm_run_callback'
if(sysvals.usetraceeventsonly):
cf = '-e dpm_prepare -e dpm_complete -e dpm_run_callback'
if(sysvals.usecallgraph or sysvals.usetraceevents):
print('INITIALIZING FTRACE...')
# turn trace off
os.system('echo 0 > '+tp+'tracing_on')
# set the trace clock to global
os.system('echo global > '+tp+'trace_clock')
# set trace buffer to a huge value
os.system('echo nop > '+tp+'current_tracer')
os.system('echo 100000 > '+tp+'buffer_size_kb')
# initialize the callgraph trace, unless this is an x2 run
if(sysvals.usecallgraph and sysvals.execcount == 1):
# set trace type
os.system('echo function_graph > '+tp+'current_tracer')
os.system('echo "" > '+tp+'set_ftrace_filter')
# set trace format options
os.system('echo funcgraph-abstime > '+tp+'trace_options')
os.system('echo funcgraph-proc > '+tp+'trace_options')
# focus only on device suspend and resume
os.system('cat '+tp+'available_filter_functions | grep '+\
cf+' > '+tp+'set_graph_function')
if(sysvals.usetraceevents):
# turn trace events on
events = iter(sysvals.traceevents)
for e in events:
os.system('echo 1 > '+sysvals.epath+e+'/enable')
# clear the trace buffer
os.system('echo "" > '+tp+'trace')
# Function: initFtraceAndroid
# Description:
# Configure ftrace to capture trace events
def initFtraceAndroid():
global sysvals
tp = sysvals.tpath
if(sysvals.usetraceevents):
print('INITIALIZING FTRACE...')
# turn trace off
os.system(sysvals.adb+" shell 'echo 0 > "+tp+"tracing_on'")
# set the trace clock to global
os.system(sysvals.adb+" shell 'echo global > "+tp+"trace_clock'")
# set trace buffer to a huge value
os.system(sysvals.adb+" shell 'echo nop > "+tp+"current_tracer'")
os.system(sysvals.adb+" shell 'echo 10000 > "+tp+"buffer_size_kb'")
# turn trace events on
events = iter(sysvals.traceevents)
for e in events:
os.system(sysvals.adb+" shell 'echo 1 > "+\
sysvals.epath+e+"/enable'")
# clear the trace buffer
os.system(sysvals.adb+" shell 'echo \"\" > "+tp+"trace'")
# Function: verifyFtrace
# Description:
# Check that ftrace is working on the system
# Output:
# True or False
def verifyFtrace():
global sysvals
# files needed for any trace data
files = ['buffer_size_kb', 'current_tracer', 'trace', 'trace_clock',
'trace_marker', 'trace_options', 'tracing_on']
# files needed for callgraph trace data
tp = sysvals.tpath
if(sysvals.usecallgraph):
files += [
'available_filter_functions',
'set_ftrace_filter',
'set_graph_function'
]
for f in files:
if(sysvals.android):
out = os.popen(sysvals.adb+' shell ls '+tp+f).read().strip()
if(out != tp+f):
return False
else:
if(os.path.exists(tp+f) == False):
return False
return True
# Function: parseStamp
# Description:
# Pull in the stamp comment line from the data file(s),
# create the stamp, and add it to the global sysvals object
# Arguments:
# m: the valid re.match output for the stamp line
def parseStamp(m, data):
global sysvals
data.stamp = {'time': '', 'host': '', 'mode': ''}
dt = datetime(int(m.group('y'))+2000, int(m.group('m')),
int(m.group('d')), int(m.group('H')), int(m.group('M')),
int(m.group('S')))
data.stamp['time'] = dt.strftime('%B %d %Y, %I:%M:%S %p')
data.stamp['host'] = m.group('host')
data.stamp['mode'] = m.group('mode')
data.stamp['kernel'] = m.group('kernel')
sysvals.suspendmode = data.stamp['mode']
if not sysvals.stamp:
sysvals.stamp = data.stamp
# Function: diffStamp
# Description:
# compare the host, kernel, and mode fields in 3 stamps
# Arguments:
# stamp1: string array with mode, kernel, and host
# stamp2: string array with mode, kernel, and host
# Return:
# True if stamps differ, False if they're the same
def diffStamp(stamp1, stamp2):
if 'host' in stamp1 and 'host' in stamp2:
if stamp1['host'] != stamp2['host']:
return True
if 'kernel' in stamp1 and 'kernel' in stamp2:
if stamp1['kernel'] != stamp2['kernel']:
return True
if 'mode' in stamp1 and 'mode' in stamp2:
if stamp1['mode'] != stamp2['mode']:
return True
return False
# Function: doesTraceLogHaveTraceEvents
# Description:
# Quickly determine if the ftrace log has some or all of the trace events
# required for primary parsing. Set the usetraceevents and/or
# usetraceeventsonly flags in the global sysvals object
def doesTraceLogHaveTraceEvents():
global sysvals
sysvals.usetraceeventsonly = True
sysvals.usetraceevents = False
for e in sysvals.traceevents:
out = os.popen('cat '+sysvals.ftracefile+' | grep "'+e+': "').read()
if(not out):
sysvals.usetraceeventsonly = False
if(e == 'suspend_resume' and out):
sysvals.usetraceevents = True
# Function: appendIncompleteTraceLog
# Description:
# [deprecated for kernel 3.15 or newer]
# Legacy support of ftrace outputs that lack the device_pm_callback
# and/or suspend_resume trace events. The primary data should be
# taken from dmesg, and this ftrace is used only for callgraph data
# or custom actions in the timeline. The data is appended to the Data
# objects provided.
# Arguments:
# testruns: the array of Data objects obtained from parseKernelLog
def appendIncompleteTraceLog(testruns):
global sysvals
# create TestRun vessels for ftrace parsing
testcnt = len(testruns)
testidx = -1
testrun = []
for data in testruns:
testrun.append(TestRun(data))
# extract the callgraph and traceevent data
vprint('Analyzing the ftrace data...')
tf = open(sysvals.ftracefile, 'r')
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
# grab the time stamp first (signifies the start of the test run)
m = re.match(sysvals.stampfmt, line)
if(m):
testidx += 1
parseStamp(m, testrun[testidx].data)
continue
# pull out any firmware data
if(re.match(sysvals.firmwarefmt, line)):
continue
# if we havent found a test time stamp yet keep spinning til we do
if(testidx < 0):
continue
# determine the trace data type (required for further parsing)
m = re.match(sysvals.tracertypefmt, line)
if(m):
tracer = m.group('t')
testrun[testidx].setTracerType(tracer)
continue
# parse only valid lines, if this isnt one move on
m = re.match(testrun[testidx].ftrace_line_fmt, line)
if(not m):
continue
# gather the basic message data from the line
m_time = m.group('time')
m_pid = m.group('pid')
m_msg = m.group('msg')
if(testrun[testidx].cgformat):
m_param3 = m.group('dur')
else:
m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# only parse the ftrace data during suspend/resume
data = testrun[testidx].data
if(not testrun[testidx].inthepipe):
# look for the suspend start marker
if(t.fevent):
if(t.name == 'SUSPEND START'):
testrun[testidx].inthepipe = True
data.setStart(t.time)
continue
else:
# trace event processing
if(t.fevent):
if(t.name == 'RESUME COMPLETE'):
testrun[testidx].inthepipe = False
data.setEnd(t.time)
if(testidx == testcnt - 1):
break
continue
# general trace events have two types, begin and end
if(re.match('(?P<name>.*) begin$', t.name)):
isbegin = True
elif(re.match('(?P<name>.*) end$', t.name)):
isbegin = False
else:
continue
m = re.match('(?P<name>.*)\[(?P<val>[0-9]*)\] .*', t.name)
if(m):
val = m.group('val')
if val == '0':
name = m.group('name')
else:
name = m.group('name')+'['+val+']'
else:
m = re.match('(?P<name>.*) .*', t.name)
name = m.group('name')
# special processing for trace events
if re.match('dpm_prepare\[.*', name):
continue
elif re.match('machine_suspend.*', name):
continue
elif re.match('suspend_enter\[.*', name):
if(not isbegin):
data.dmesg['suspend_prepare']['end'] = t.time
continue
elif re.match('dpm_suspend\[.*', name):
if(not isbegin):
data.dmesg['suspend']['end'] = t.time
continue
elif re.match('dpm_suspend_late\[.*', name):
if(isbegin):
data.dmesg['suspend_late']['start'] = t.time
else:
data.dmesg['suspend_late']['end'] = t.time
continue
elif re.match('dpm_suspend_noirq\[.*', name):
if(isbegin):
data.dmesg['suspend_noirq']['start'] = t.time
else:
data.dmesg['suspend_noirq']['end'] = t.time
continue
elif re.match('dpm_resume_noirq\[.*', name):
if(isbegin):
data.dmesg['resume_machine']['end'] = t.time
data.dmesg['resume_noirq']['start'] = t.time
else:
data.dmesg['resume_noirq']['end'] = t.time
continue
elif re.match('dpm_resume_early\[.*', name):
if(isbegin):
data.dmesg['resume_early']['start'] = t.time
else:
data.dmesg['resume_early']['end'] = t.time
continue
elif re.match('dpm_resume\[.*', name):
if(isbegin):
data.dmesg['resume']['start'] = t.time
else:
data.dmesg['resume']['end'] = t.time
continue
elif re.match('dpm_complete\[.*', name):
if(isbegin):
data.dmesg['resume_complete']['start'] = t.time
else:
data.dmesg['resume_complete']['end'] = t.time
continue
# is this trace event outside of the devices calls
if(data.isTraceEventOutsideDeviceCalls(pid, t.time)):
# global events (outside device calls) are simply graphed
if(isbegin):
# store each trace event in ttemp
if(name not in testrun[testidx].ttemp):
testrun[testidx].ttemp[name] = []
testrun[testidx].ttemp[name].append(\
{'begin': t.time, 'end': t.time})
else:
# finish off matching trace event in ttemp
if(name in testrun[testidx].ttemp):
testrun[testidx].ttemp[name][-1]['end'] = t.time
else:
if(isbegin):
data.addIntraDevTraceEvent('', name, pid, t.time)
else:
data.capIntraDevTraceEvent('', name, pid, t.time)
# call/return processing
elif sysvals.usecallgraph:
# create a callgraph object for the data
if(pid not in testrun[testidx].ftemp):
testrun[testidx].ftemp[pid] = []
testrun[testidx].ftemp[pid].append(FTraceCallGraph())
# when the call is finished, see which device matches it
cg = testrun[testidx].ftemp[pid][-1]
if(cg.addLine(t, m)):
testrun[testidx].ftemp[pid].append(FTraceCallGraph())
tf.close()
for test in testrun:
# add the traceevent data to the device hierarchy
if(sysvals.usetraceevents):
for name in test.ttemp:
for event in test.ttemp[name]:
begin = event['begin']
end = event['end']
# if event starts before timeline start, expand timeline
if(begin < test.data.start):
test.data.setStart(begin)
# if event ends after timeline end, expand the timeline
if(end > test.data.end):
test.data.setEnd(end)
test.data.newActionGlobal(name, begin, end)
# add the callgraph data to the device hierarchy
for pid in test.ftemp:
for cg in test.ftemp[pid]:
if(not cg.sanityCheck()):
id = 'task %s cpu %s' % (pid, m.group('cpu'))
vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
callstart = cg.start
callend = cg.end
for p in test.data.phases:
if(test.data.dmesg[p]['start'] <= callstart and
callstart <= test.data.dmesg[p]['end']):
list = test.data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
callstart <= dev['start'] and
callend >= dev['end']):
dev['ftrace'] = cg
break
if(sysvals.verbose):
test.data.printDetails()
# add the time in between the tests as a new phase so we can see it
if(len(testruns) > 1):
t1e = testruns[0].getEnd()
t2s = testruns[-1].getStart()
testruns[-1].newPhaseWithSingleAction('user mode', \
'user mode', t1e, t2s, '#FF9966')
# Function: parseTraceLog
# Description:
# Analyze an ftrace log output file generated from this app during
# the execution phase. Used when the ftrace log is the primary data source
# and includes the suspend_resume and device_pm_callback trace events
# The ftrace filename is taken from sysvals
# Output:
# An array of Data objects
def parseTraceLog():
global sysvals
vprint('Analyzing the ftrace data...')
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s doesnt exist' % sysvals.ftracefile, False)
# extract the callgraph and traceevent data
testruns = []
testdata = []
testrun = 0
data = 0
tf = open(sysvals.ftracefile, 'r')
phase = 'suspend_prepare'
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
# stamp line: each stamp means a new test run
m = re.match(sysvals.stampfmt, line)
if(m):
data = Data(len(testdata))
testdata.append(data)
testrun = TestRun(data)
testruns.append(testrun)
parseStamp(m, data)
continue
if(not data):
continue
# firmware line: pull out any firmware data
m = re.match(sysvals.firmwarefmt, line)
if(m):
data.fwSuspend = int(m.group('s'))
data.fwResume = int(m.group('r'))
if(data.fwSuspend > 0 or data.fwResume > 0):
data.fwValid = True
continue
# tracer type line: determine the trace data type
m = re.match(sysvals.tracertypefmt, line)
if(m):
tracer = m.group('t')
testrun.setTracerType(tracer)
continue
# post resume time line: did this test run include post-resume data
m = re.match(sysvals.postresumefmt, line)
if(m):
t = int(m.group('t'))
if(t > 0):
sysvals.postresumetime = t
continue
# ftrace line: parse only valid lines
m = re.match(testrun.ftrace_line_fmt, line)
if(not m):
continue
# gather the basic message data from the line
m_time = m.group('time')
m_pid = m.group('pid')
m_msg = m.group('msg')
if(testrun.cgformat):
m_param3 = m.group('dur')
else:
m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# only parse the ftrace data during suspend/resume
if(not testrun.inthepipe):
# look for the suspend start marker
if(t.fevent):
if(t.name == 'SUSPEND START'):
testrun.inthepipe = True
data.setStart(t.time)
continue
# trace event processing
if(t.fevent):
if(t.name == 'RESUME COMPLETE'):
if(sysvals.postresumetime > 0):
phase = 'post_resume'
data.newPhase(phase, t.time, t.time, '#FF9966', -1)
else:
testrun.inthepipe = False
data.setEnd(t.time)
continue
if(phase == 'post_resume'):
data.setEnd(t.time)
if(t.type == 'suspend_resume'):
# suspend_resume trace events have two types, begin and end
if(re.match('(?P<name>.*) begin$', t.name)):
isbegin = True
elif(re.match('(?P<name>.*) end$', t.name)):
isbegin = False
else:
continue
m = re.match('(?P<name>.*)\[(?P<val>[0-9]*)\] .*', t.name)
if(m):
val = m.group('val')
if val == '0':
name = m.group('name')
else:
name = m.group('name')+'['+val+']'
else:
m = re.match('(?P<name>.*) .*', t.name)
name = m.group('name')
# ignore these events
if(re.match('acpi_suspend\[.*', t.name) or
re.match('suspend_enter\[.*', name)):
continue
# -- phase changes --
# suspend_prepare start
if(re.match('dpm_prepare\[.*', t.name)):
phase = 'suspend_prepare'
if(not isbegin):
data.dmesg[phase]['end'] = t.time
continue
# suspend start
elif(re.match('dpm_suspend\[.*', t.name)):
phase = 'suspend'
data.setPhase(phase, t.time, isbegin)
continue
# suspend_late start
elif(re.match('dpm_suspend_late\[.*', t.name)):
phase = 'suspend_late'
data.setPhase(phase, t.time, isbegin)
continue
# suspend_noirq start
elif(re.match('dpm_suspend_noirq\[.*', t.name)):
phase = 'suspend_noirq'
data.setPhase(phase, t.time, isbegin)
if(not isbegin):
phase = 'suspend_machine'
data.dmesg[phase]['start'] = t.time
continue
# suspend_machine/resume_machine
elif(re.match('machine_suspend\[.*', t.name)):
if(isbegin):
phase = 'suspend_machine'
data.dmesg[phase]['end'] = t.time
data.tSuspended = t.time
else:
if(sysvals.suspendmode in ['mem', 'disk']):
data.dmesg['suspend_machine']['end'] = t.time
data.tSuspended = t.time
phase = 'resume_machine'
data.dmesg[phase]['start'] = t.time
data.tResumed = t.time
data.tLow = data.tResumed - data.tSuspended
continue
# resume_noirq start
elif(re.match('dpm_resume_noirq\[.*', t.name)):
phase = 'resume_noirq'
data.setPhase(phase, t.time, isbegin)
if(isbegin):
data.dmesg['resume_machine']['end'] = t.time
continue
# resume_early start
elif(re.match('dpm_resume_early\[.*', t.name)):
phase = 'resume_early'
data.setPhase(phase, t.time, isbegin)
continue
# resume start
elif(re.match('dpm_resume\[.*', t.name)):
phase = 'resume'
data.setPhase(phase, t.time, isbegin)
continue
# resume complete start
elif(re.match('dpm_complete\[.*', t.name)):
phase = 'resume_complete'
if(isbegin):
data.dmesg[phase]['start'] = t.time
continue
# is this trace event outside of the devices calls
if(data.isTraceEventOutsideDeviceCalls(pid, t.time)):
# global events (outside device calls) are simply graphed
if(name not in testrun.ttemp):
testrun.ttemp[name] = []
if(isbegin):
# create a new list entry
testrun.ttemp[name].append(\
{'begin': t.time, 'end': t.time})
else:
if(len(testrun.ttemp[name]) > 0):
# if an antry exists, assume this is its end
testrun.ttemp[name][-1]['end'] = t.time
elif(phase == 'post_resume'):
# post resume events can just have ends
testrun.ttemp[name].append({
'begin': data.dmesg[phase]['start'],
'end': t.time})
else:
if(isbegin):
data.addIntraDevTraceEvent('', name, pid, t.time)
else:
data.capIntraDevTraceEvent('', name, pid, t.time)
# device callback start
elif(t.type == 'device_pm_callback_start'):
m = re.match('(?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*',\
t.name);
if(not m):
continue
drv = m.group('drv')
n = m.group('d')
p = m.group('p')
if(n and p):
data.newAction(phase, n, pid, p, t.time, -1, drv)
# device callback finish
elif(t.type == 'device_pm_callback_end'):
m = re.match('(?P<drv>.*) (?P<d>.*), err.*', t.name);
if(not m):
continue
n = m.group('d')
list = data.dmesg[phase]['list']
if(n in list):
dev = list[n]
dev['length'] = t.time - dev['start']
dev['end'] = t.time
# callgraph processing
elif sysvals.usecallgraph:
# this shouldn't happen, but JIC, ignore callgraph data post-res
if(phase == 'post_resume'):
continue
# create a callgraph object for the data
if(pid not in testrun.ftemp):
testrun.ftemp[pid] = []
testrun.ftemp[pid].append(FTraceCallGraph())
# when the call is finished, see which device matches it
cg = testrun.ftemp[pid][-1]
if(cg.addLine(t, m)):
testrun.ftemp[pid].append(FTraceCallGraph())
tf.close()
for test in testruns:
# add the traceevent data to the device hierarchy
if(sysvals.usetraceevents):
for name in test.ttemp:
for event in test.ttemp[name]:
begin = event['begin']
end = event['end']
# if event starts before timeline start, expand timeline
if(begin < test.data.start):
test.data.setStart(begin)
# if event ends after timeline end, expand the timeline
if(end > test.data.end):
test.data.setEnd(end)
test.data.newActionGlobal(name, begin, end)
# add the callgraph data to the device hierarchy
borderphase = {
'dpm_prepare': 'suspend_prepare',
'dpm_complete': 'resume_complete'
}
for pid in test.ftemp:
for cg in test.ftemp[pid]:
if len(cg.list) < 2:
continue
if(not cg.sanityCheck()):
id = 'task %s cpu %s' % (pid, m.group('cpu'))
vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
callstart = cg.start
callend = cg.end
if(cg.list[0].name in borderphase):
p = borderphase[cg.list[0].name]
list = test.data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
callstart <= dev['start'] and
callend >= dev['end']):
dev['ftrace'] = cg.slice(dev['start'], dev['end'])
continue
if(cg.list[0].name != 'dpm_run_callback'):
continue
for p in test.data.phases:
if(test.data.dmesg[p]['start'] <= callstart and
callstart <= test.data.dmesg[p]['end']):
list = test.data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
callstart <= dev['start'] and
callend >= dev['end']):
dev['ftrace'] = cg
break
# fill in any missing phases
for data in testdata:
lp = data.phases[0]
for p in data.phases:
if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0):
print('WARNING: phase "%s" is missing!' % p)
if(data.dmesg[p]['start'] < 0):
data.dmesg[p]['start'] = data.dmesg[lp]['end']
if(p == 'resume_machine'):
data.tSuspended = data.dmesg[lp]['end']
data.tResumed = data.dmesg[lp]['end']
data.tLow = 0
if(data.dmesg[p]['end'] < 0):
data.dmesg[p]['end'] = data.dmesg[p]['start']
lp = p
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
if(sysvals.verbose):
data.printDetails()
# add the time in between the tests as a new phase so we can see it
if(len(testdata) > 1):
t1e = testdata[0].getEnd()
t2s = testdata[-1].getStart()
testdata[-1].newPhaseWithSingleAction('user mode', \
'user mode', t1e, t2s, '#FF9966')
return testdata
# Function: loadKernelLog
# Description:
# [deprecated for kernel 3.15.0 or newer]
# load the dmesg file into memory and fix up any ordering issues
# The dmesg filename is taken from sysvals
# Output:
# An array of empty Data objects with only their dmesgtext attributes set
def loadKernelLog():
global sysvals
vprint('Analyzing the dmesg data...')
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s doesnt exist' % sysvals.dmesgfile, False)
# there can be multiple test runs in a single file delineated by stamps
testruns = []
data = 0
lf = open(sysvals.dmesgfile, 'r')
for line in lf:
line = line.replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match(sysvals.stampfmt, line)
if(m):
if(data):
testruns.append(data)
data = Data(len(testruns))
parseStamp(m, data)
continue
if(not data):
continue
m = re.match(sysvals.firmwarefmt, line)
if(m):
data.fwSuspend = int(m.group('s'))
data.fwResume = int(m.group('r'))
if(data.fwSuspend > 0 or data.fwResume > 0):
data.fwValid = True
continue
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
data.dmesgtext.append(line)
if(re.match('ACPI: resume from mwait', m.group('msg'))):
print('NOTE: This suspend appears to be freeze rather than'+\
' %s, it will be treated as such' % sysvals.suspendmode)
sysvals.suspendmode = 'freeze'
else:
vprint('ignoring dmesg line: %s' % line.replace('\n', ''))
testruns.append(data)
lf.close()
if(not data):
print('ERROR: analyze_suspend header missing from dmesg log')
sys.exit()
# fix lines with same timestamp/function with the call and return swapped
for data in testruns:
last = ''
for line in data.dmesgtext:
mc = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) calling '+\
'(?P<f>.*)\+ @ .*, parent: .*', line)
mr = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) call '+\
'(?P<f>.*)\+ returned .* after (?P<dt>.*) usecs', last)
if(mc and mr and (mc.group('t') == mr.group('t')) and
(mc.group('f') == mr.group('f'))):
i = data.dmesgtext.index(last)
j = data.dmesgtext.index(line)
data.dmesgtext[i] = line
data.dmesgtext[j] = last
last = line
return testruns
# Function: parseKernelLog
# Description:
# [deprecated for kernel 3.15.0 or newer]
# Analyse a dmesg log output file generated from this app during
# the execution phase. Create a set of device structures in memory
# for subsequent formatting in the html output file
# This call is only for legacy support on kernels where the ftrace
# data lacks the suspend_resume or device_pm_callbacks trace events.
# Arguments:
# data: an empty Data object (with dmesgtext) obtained from loadKernelLog
# Output:
# The filled Data object
def parseKernelLog(data):
global sysvals
phase = 'suspend_runtime'
if(data.fwValid):
vprint('Firmware Suspend = %u ns, Firmware Resume = %u ns' % \
(data.fwSuspend, data.fwResume))
# dmesg phase match table
dm = {
'suspend_prepare': 'PM: Syncing filesystems.*',
'suspend': 'PM: Entering [a-z]* sleep.*',
'suspend_late': 'PM: suspend of devices complete after.*',
'suspend_noirq': 'PM: late suspend of devices complete after.*',
'suspend_machine': 'PM: noirq suspend of devices complete after.*',
'resume_machine': 'ACPI: Low-level resume complete.*',
'resume_noirq': 'ACPI: Waking up from system sleep state.*',
'resume_early': 'PM: noirq resume of devices complete after.*',
'resume': 'PM: early resume of devices complete after.*',
'resume_complete': 'PM: resume of devices complete after.*',
'post_resume': '.*Restarting tasks \.\.\..*',
}
if(sysvals.suspendmode == 'standby'):
dm['resume_machine'] = 'PM: Restoring platform NVS memory'
elif(sysvals.suspendmode == 'disk'):
dm['suspend_late'] = 'PM: freeze of devices complete after.*'
dm['suspend_noirq'] = 'PM: late freeze of devices complete after.*'
dm['suspend_machine'] = 'PM: noirq freeze of devices complete after.*'
dm['resume_machine'] = 'PM: Restoring platform NVS memory'
dm['resume_early'] = 'PM: noirq restore of devices complete after.*'
dm['resume'] = 'PM: early restore of devices complete after.*'
dm['resume_complete'] = 'PM: restore of devices complete after.*'
elif(sysvals.suspendmode == 'freeze'):
dm['resume_machine'] = 'ACPI: resume from mwait'
# action table (expected events that occur and show up in dmesg)
at = {
'sync_filesystems': {
'smsg': 'PM: Syncing filesystems.*',
'emsg': 'PM: Preparing system for mem sleep.*' },
'freeze_user_processes': {
'smsg': 'Freezing user space processes .*',
'emsg': 'Freezing remaining freezable tasks.*' },
'freeze_tasks': {
'smsg': 'Freezing remaining freezable tasks.*',
'emsg': 'PM: Entering (?P<mode>[a-z,A-Z]*) sleep.*' },
'ACPI prepare': {
'smsg': 'ACPI: Preparing to enter system sleep state.*',
'emsg': 'PM: Saving platform NVS memory.*' },
'PM vns': {
'smsg': 'PM: Saving platform NVS memory.*',
'emsg': 'Disabling non-boot CPUs .*' },
}
t0 = -1.0
cpu_start = -1.0
prevktime = -1.0
actions = dict()
for line in data.dmesgtext:
# -- preprocessing --
# parse each dmesg line into the time and message
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
val = m.group('ktime')
try:
ktime = float(val)
except:
doWarning('INVALID DMESG LINE: '+\
line.replace('\n', ''), 'dmesg')
continue
msg = m.group('msg')
# initialize data start to first line time
if t0 < 0:
data.setStart(ktime)
t0 = ktime
else:
continue
# hack for determining resume_machine end for freeze
if(not sysvals.usetraceevents and sysvals.suspendmode == 'freeze' \
and phase == 'resume_machine' and \
re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
data.dmesg['resume_machine']['end'] = ktime
phase = 'resume_noirq'
data.dmesg[phase]['start'] = ktime
# -- phase changes --
# suspend start
if(re.match(dm['suspend_prepare'], msg)):
phase = 'suspend_prepare'
data.dmesg[phase]['start'] = ktime
data.setStart(ktime)
# suspend start
elif(re.match(dm['suspend'], msg)):
data.dmesg['suspend_prepare']['end'] = ktime
phase = 'suspend'
data.dmesg[phase]['start'] = ktime
# suspend_late start
elif(re.match(dm['suspend_late'], msg)):
data.dmesg['suspend']['end'] = ktime
phase = 'suspend_late'
data.dmesg[phase]['start'] = ktime
# suspend_noirq start
elif(re.match(dm['suspend_noirq'], msg)):
data.dmesg['suspend_late']['end'] = ktime
phase = 'suspend_noirq'
data.dmesg[phase]['start'] = ktime
# suspend_machine start
elif(re.match(dm['suspend_machine'], msg)):
data.dmesg['suspend_noirq']['end'] = ktime
phase = 'suspend_machine'
data.dmesg[phase]['start'] = ktime
# resume_machine start
elif(re.match(dm['resume_machine'], msg)):
if(sysvals.suspendmode in ['freeze', 'standby']):
data.tSuspended = prevktime
data.dmesg['suspend_machine']['end'] = prevktime
else:
data.tSuspended = ktime
data.dmesg['suspend_machine']['end'] = ktime
phase = 'resume_machine'
data.tResumed = ktime
data.tLow = data.tResumed - data.tSuspended
data.dmesg[phase]['start'] = ktime
# resume_noirq start
elif(re.match(dm['resume_noirq'], msg)):
data.dmesg['resume_machine']['end'] = ktime
phase = 'resume_noirq'
data.dmesg[phase]['start'] = ktime
# resume_early start
elif(re.match(dm['resume_early'], msg)):
data.dmesg['resume_noirq']['end'] = ktime
phase = 'resume_early'
data.dmesg[phase]['start'] = ktime
# resume start
elif(re.match(dm['resume'], msg)):
data.dmesg['resume_early']['end'] = ktime
phase = 'resume'
data.dmesg[phase]['start'] = ktime
# resume complete start
elif(re.match(dm['resume_complete'], msg)):
data.dmesg['resume']['end'] = ktime
phase = 'resume_complete'
data.dmesg[phase]['start'] = ktime
# post resume start
elif(re.match(dm['post_resume'], msg)):
data.dmesg['resume_complete']['end'] = ktime
data.setEnd(ktime)
phase = 'post_resume'
break
# -- device callbacks --
if(phase in data.phases):
# device init call
if(re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
sm = re.match('calling (?P<f>.*)\+ @ '+\
'(?P<n>.*), parent: (?P<p>.*)', msg);
f = sm.group('f')
n = sm.group('n')
p = sm.group('p')
if(f and n and p):
data.newAction(phase, f, int(n), p, ktime, -1, '')
# device init return
elif(re.match('call (?P<f>.*)\+ returned .* after '+\
'(?P<t>.*) usecs', msg)):
sm = re.match('call (?P<f>.*)\+ returned .* after '+\
'(?P<t>.*) usecs(?P<a>.*)', msg);
f = sm.group('f')
t = sm.group('t')
list = data.dmesg[phase]['list']
if(f in list):
dev = list[f]
dev['length'] = int(t)
dev['end'] = ktime
# -- non-devicecallback actions --
# if trace events are not available, these are better than nothing
if(not sysvals.usetraceevents):
# look for known actions
for a in at:
if(re.match(at[a]['smsg'], msg)):
if(a not in actions):
actions[a] = []
actions[a].append({'begin': ktime, 'end': ktime})
if(re.match(at[a]['emsg'], msg)):
actions[a][-1]['end'] = ktime
# now look for CPU on/off events
if(re.match('Disabling non-boot CPUs .*', msg)):
# start of first cpu suspend
cpu_start = ktime
elif(re.match('Enabling non-boot CPUs .*', msg)):
# start of first cpu resume
cpu_start = ktime
elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)):
# end of a cpu suspend, start of the next
m = re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
elif(re.match('CPU(?P<cpu>[0-9]*) is up', msg)):
# end of a cpu resume, start of the next
m = re.match('CPU(?P<cpu>[0-9]*) is up', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
prevktime = ktime
# fill in any missing phases
lp = data.phases[0]
for p in data.phases:
if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0):
print('WARNING: phase "%s" is missing, something went wrong!' % p)
print(' In %s, this dmesg line denotes the start of %s:' % \
(sysvals.suspendmode, p))
print(' "%s"' % dm[p])
if(data.dmesg[p]['start'] < 0):
data.dmesg[p]['start'] = data.dmesg[lp]['end']
if(p == 'resume_machine'):
data.tSuspended = data.dmesg[lp]['end']
data.tResumed = data.dmesg[lp]['end']
data.tLow = 0
if(data.dmesg[p]['end'] < 0):
data.dmesg[p]['end'] = data.dmesg[p]['start']
lp = p
# fill in any actions we've found
for name in actions:
for event in actions[name]:
begin = event['begin']
end = event['end']
# if event starts before timeline start, expand timeline
if(begin < data.start):
data.setStart(begin)
# if event ends after timeline end, expand the timeline
if(end > data.end):
data.setEnd(end)
data.newActionGlobal(name, begin, end)
if(sysvals.verbose):
data.printDetails()
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
return True
# Function: setTimelineRows
# Description:
# Organize the timeline entries into the smallest
# number of rows possible, with no entry overlapping
# Arguments:
# list: the list of devices/actions for a single phase
# sortedkeys: cronologically sorted key list to use
# Output:
# The total number of rows needed to display this phase of the timeline
def setTimelineRows(list, sortedkeys):
# clear all rows and set them to undefined
remaining = len(list)
rowdata = dict()
row = 0
for item in list:
list[item]['row'] = -1
# try to pack each row with as many ranges as possible
while(remaining > 0):
if(row not in rowdata):
rowdata[row] = []
for item in sortedkeys:
if(list[item]['row'] < 0):
s = list[item]['start']
e = list[item]['end']
valid = True
for ritem in rowdata[row]:
rs = ritem['start']
re = ritem['end']
if(not (((s <= rs) and (e <= rs)) or
((s >= re) and (e >= re)))):
valid = False
break
if(valid):
rowdata[row].append(list[item])
list[item]['row'] = row
remaining -= 1
row += 1
return row
# Function: createTimeScale
# Description:
# Create the timescale header for the html timeline
# Arguments:
# t0: start time (suspend begin)
# tMax: end time (resume end)
# tSuspend: time when suspend occurs, i.e. the zero time
# Output:
# The html code needed to display the time scale
def createTimeScale(t0, tMax, tSuspended):
timescale = '<div class="t" style="right:{0}%">{1}</div>\n'
output = '<div id="timescale">\n'
# set scale for timeline
tTotal = tMax - t0
tS = 0.1
if(tTotal <= 0):
return output
if(tTotal > 4):
tS = 1
if(tSuspended < 0):
for i in range(int(tTotal/tS)+1):
pos = '%0.3f' % (100 - ((float(i)*tS*100)/tTotal))
if(i > 0):
val = '%0.fms' % (float(i)*tS*1000)
else:
val = ''
output += timescale.format(pos, val)
else:
tSuspend = tSuspended - t0
divTotal = int(tTotal/tS) + 1
divSuspend = int(tSuspend/tS)
s0 = (tSuspend - tS*divSuspend)*100/tTotal
for i in range(divTotal):
pos = '%0.3f' % (100 - ((float(i)*tS*100)/tTotal) - s0)
if((i == 0) and (s0 < 3)):
val = ''
elif(i == divSuspend):
val = 'S/R'
else:
val = '%0.fms' % (float(i-divSuspend)*tS*1000)
output += timescale.format(pos, val)
output += '</div>\n'
return output
# Function: createHTMLSummarySimple
# Description:
# Create summary html file for a series of tests
# Arguments:
# testruns: array of Data objects from parseTraceLog
def createHTMLSummarySimple(testruns, htmlfile):
global sysvals
# print out the basic summary of all the tests
hf = open(htmlfile, 'w')
# write the html header first (html head, css code, up to body start)
html = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>AnalyzeSuspend Summary</title>\n\
<style type=\'text/css\'>\n\
body {overflow-y: scroll;}\n\
.stamp {width: 100%;text-align:center;background-color:#495E09;line-height:30px;color:white;font: 25px Arial;}\n\
table {width:100%;border-collapse: collapse;}\n\
.summary {font: 22px Arial;border:1px solid;}\n\
th {border: 1px solid black;background-color:#A7C942;color:white;}\n\
td {text-align: center;}\n\
tr.alt td {background-color:#EAF2D3;}\n\
tr.avg td {background-color:#BDE34C;}\n\
a:link {color: #90B521;}\n\
a:visited {color: #495E09;}\n\
a:hover {color: #B1DF28;}\n\
a:active {color: #FFFFFF;}\n\
</style>\n</head>\n<body>\n'
# group test header
count = len(testruns)
headline_stamp = '<div class="stamp">{0} {1} {2} {3} ({4} tests)</div>\n'
html += headline_stamp.format(sysvals.stamp['host'],
sysvals.stamp['kernel'], sysvals.stamp['mode'],
sysvals.stamp['time'], count)
# check to see if all the tests have the same value
stampcolumns = False
for data in testruns:
if diffStamp(sysvals.stamp, data.stamp):
stampcolumns = True
break
th = '\t<th>{0}</th>\n'
td = '\t<td>{0}</td>\n'
tdlink = '\t<td><a href="{0}">Click Here</a></td>\n'
# table header
html += '<table class="summary">\n<tr>\n'
html += th.format("Test #")
if stampcolumns:
html += th.format("Hostname")
html += th.format("Kernel Version")
html += th.format("Suspend Mode")
html += th.format("Test Time")
html += th.format("Suspend Time")
html += th.format("Resume Time")
html += th.format("Detail")
html += '</tr>\n'
# test data, 1 row per test
sTimeAvg = 0.0
rTimeAvg = 0.0
num = 1
for data in testruns:
# data.end is the end of post_resume
resumeEnd = data.dmesg['resume_complete']['end']
if num % 2 == 1:
html += '<tr class="alt">\n'
else:
html += '<tr>\n'
# test num
html += td.format("test %d" % num)
num += 1
if stampcolumns:
# host name
val = "unknown"
if('host' in data.stamp):
val = data.stamp['host']
html += td.format(val)
# host kernel
val = "unknown"
if('kernel' in data.stamp):
val = data.stamp['kernel']
html += td.format(val)
# suspend mode
val = "unknown"
if('mode' in data.stamp):
val = data.stamp['mode']
html += td.format(val)
# test time
val = "unknown"
if('time' in data.stamp):
val = data.stamp['time']
html += td.format(val)
# suspend time
sTime = (data.tSuspended - data.start)*1000
sTimeAvg += sTime
html += td.format("%3.3f ms" % sTime)
# resume time
rTime = (resumeEnd - data.tResumed)*1000
rTimeAvg += rTime
html += td.format("%3.3f ms" % rTime)
# link to the output html
html += tdlink.format(data.outfile)
html += '</tr>\n'
# last line: test average
if(count > 0):
sTimeAvg /= count
rTimeAvg /= count
html += '<tr class="avg">\n'
html += td.format('Average') # name
if stampcolumns:
html += td.format('') # host
html += td.format('') # kernel
html += td.format('') # mode
html += td.format('') # time
html += td.format("%3.3f ms" % sTimeAvg) # suspend time
html += td.format("%3.3f ms" % rTimeAvg) # resume time
html += td.format('') # output link
html += '</tr>\n'
# flush the data to file
hf.write(html+'</table>\n')
hf.write('</body>\n</html>\n')
hf.close()
# Function: createHTML
# Description:
# Create the output html file from the resident test data
# Arguments:
# testruns: array of Data objects from parseKernelLog or parseTraceLog
# Output:
# True if the html file was created, false if it failed
def createHTML(testruns):
global sysvals
for data in testruns:
data.normalizeTime(testruns[-1].tSuspended)
x2changes = ['', 'absolute']
if len(testruns) > 1:
x2changes = ['1', 'relative']
# html function templates
headline_stamp = '<div class="stamp">{0} {1} {2} {3}</div>\n'
html_devlist1 = '<button id="devlist1" class="devlist" style="float:left;">Device Detail%s</button>' % x2changes[0]
html_zoombox = '<center><button id="zoomin">ZOOM IN</button><button id="zoomout">ZOOM OUT</button><button id="zoomdef">ZOOM 1:1</button></center>\n'
html_devlist2 = '<button id="devlist2" class="devlist" style="float:right;">Device Detail2</button>\n'
html_timeline = '<div id="dmesgzoombox" class="zoombox">\n<div id="{0}" class="timeline" style="height:{1}px">\n'
html_device = '<div id="{0}" title="{1}" class="thread" style="left:{2}%;top:{3}%;height:{4}%;width:{5}%;">{6}</div>\n'
html_traceevent = '<div title="{0}" class="traceevent" style="left:{1}%;top:{2}%;height:{3}%;width:{4}%;border:1px solid {5};background-color:{5}">{6}</div>\n'
html_phase = '<div class="phase" style="left:{0}%;width:{1}%;top:{2}%;height:{3}%;background-color:{4}">{5}</div>\n'
html_phaselet = '<div id="{0}" class="phaselet" style="left:{1}%;width:{2}%;background-color:{3}"></div>\n'
html_legend = '<div class="square" style="left:{0}%;background-color:{1}"> {2}</div>\n'
html_timetotal = '<table class="time1">\n<tr>'\
'<td class="green">{2} Suspend Time: <b>{0} ms</b></td>'\
'<td class="yellow">{2} Resume Time: <b>{1} ms</b></td>'\
'</tr>\n</table>\n'
html_timetotal2 = '<table class="time1">\n<tr>'\
'<td class="green">{3} Suspend Time: <b>{0} ms</b></td>'\
'<td class="gray">'+sysvals.suspendmode+' time: <b>{1} ms</b></td>'\
'<td class="yellow">{3} Resume Time: <b>{2} ms</b></td>'\
'</tr>\n</table>\n'
html_timegroups = '<table class="time2">\n<tr>'\
'<td class="green">{4}Kernel Suspend: {0} ms</td>'\
'<td class="purple">{4}Firmware Suspend: {1} ms</td>'\
'<td class="purple">{4}Firmware Resume: {2} ms</td>'\
'<td class="yellow">{4}Kernel Resume: {3} ms</td>'\
'</tr>\n</table>\n'
# device timeline
vprint('Creating Device Timeline...')
devtl = Timeline()
# Generate the header for this timeline
textnum = ['First', 'Second']
for data in testruns:
tTotal = data.end - data.start
tEnd = data.dmesg['resume_complete']['end']
if(tTotal == 0):
print('ERROR: No timeline data')
sys.exit()
if(data.tLow > 0):
low_time = '%.0f'%(data.tLow*1000)
if data.fwValid:
suspend_time = '%.0f'%((data.tSuspended-data.start)*1000 + \
(data.fwSuspend/1000000.0))
resume_time = '%.0f'%((tEnd-data.tSuspended)*1000 + \
(data.fwResume/1000000.0))
testdesc1 = 'Total'
testdesc2 = ''
if(len(testruns) > 1):
testdesc1 = testdesc2 = textnum[data.testnumber]
testdesc2 += ' '
if(data.tLow == 0):
thtml = html_timetotal.format(suspend_time, \
resume_time, testdesc1)
else:
thtml = html_timetotal2.format(suspend_time, low_time, \
resume_time, testdesc1)
devtl.html['timeline'] += thtml
sktime = '%.3f'%((data.dmesg['suspend_machine']['end'] - \
data.getStart())*1000)
sftime = '%.3f'%(data.fwSuspend / 1000000.0)
rftime = '%.3f'%(data.fwResume / 1000000.0)
rktime = '%.3f'%((data.getEnd() - \
data.dmesg['resume_machine']['start'])*1000)
devtl.html['timeline'] += html_timegroups.format(sktime, \
sftime, rftime, rktime, testdesc2)
else:
suspend_time = '%.0f'%((data.tSuspended-data.start)*1000)
resume_time = '%.0f'%((tEnd-data.tSuspended)*1000)
testdesc = 'Kernel'
if(len(testruns) > 1):
testdesc = textnum[data.testnumber]+' '+testdesc
if(data.tLow == 0):
thtml = html_timetotal.format(suspend_time, \
resume_time, testdesc)
else:
thtml = html_timetotal2.format(suspend_time, low_time, \
resume_time, testdesc)
devtl.html['timeline'] += thtml
# time scale for potentially multiple datasets
t0 = testruns[0].start
tMax = testruns[-1].end
tSuspended = testruns[-1].tSuspended
tTotal = tMax - t0
# determine the maximum number of rows we need to draw
timelinerows = 0
for data in testruns:
for phase in data.dmesg:
list = data.dmesg[phase]['list']
rows = setTimelineRows(list, list)
data.dmesg[phase]['row'] = rows
if(rows > timelinerows):
timelinerows = rows
# calculate the timeline height and create bounding box, add buttons
devtl.setRows(timelinerows + 1)
devtl.html['timeline'] += html_devlist1
if len(testruns) > 1:
devtl.html['timeline'] += html_devlist2
devtl.html['timeline'] += html_zoombox
devtl.html['timeline'] += html_timeline.format('dmesg', devtl.height)
# draw the colored boxes for each of the phases
for data in testruns:
for b in data.dmesg:
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal)
width = '%.3f' % ((length*100.0)/tTotal)
devtl.html['timeline'] += html_phase.format(left, width, \
'%.3f'%devtl.scaleH, '%.3f'%(100-devtl.scaleH), \
data.dmesg[b]['color'], '')
# draw the time scale, try to make the number of labels readable
devtl.html['scale'] = createTimeScale(t0, tMax, tSuspended)
devtl.html['timeline'] += devtl.html['scale']
for data in testruns:
for b in data.dmesg:
phaselist = data.dmesg[b]['list']
for d in phaselist:
name = d
drv = ''
dev = phaselist[d]
if(d in sysvals.altdevname):
name = sysvals.altdevname[d]
if('drv' in dev and dev['drv']):
drv = ' {%s}' % dev['drv']
height = (100.0 - devtl.scaleH)/data.dmesg[b]['row']
top = '%.3f' % ((dev['row']*height) + devtl.scaleH)
left = '%.3f' % (((dev['start']-t0)*100)/tTotal)
width = '%.3f' % (((dev['end']-dev['start'])*100)/tTotal)
length = ' (%0.3f ms) ' % ((dev['end']-dev['start'])*1000)
color = 'rgba(204,204,204,0.5)'
devtl.html['timeline'] += html_device.format(dev['id'], \
d+drv+length+b, left, top, '%.3f'%height, width, name+drv)
# draw any trace events found
for data in testruns:
for b in data.dmesg:
phaselist = data.dmesg[b]['list']
for name in phaselist:
dev = phaselist[name]
if('traceevents' in dev):
vprint('Debug trace events found for device %s' % name)
vprint('%20s %20s %10s %8s' % ('action', \
'name', 'time(ms)', 'length(ms)'))
for e in dev['traceevents']:
vprint('%20s %20s %10.3f %8.3f' % (e.action, \
e.name, e.time*1000, e.length*1000))
height = (100.0 - devtl.scaleH)/data.dmesg[b]['row']
top = '%.3f' % ((dev['row']*height) + devtl.scaleH)
left = '%.3f' % (((e.time-t0)*100)/tTotal)
width = '%.3f' % (e.length*100/tTotal)
color = 'rgba(204,204,204,0.5)'
devtl.html['timeline'] += \
html_traceevent.format(e.action+' '+e.name, \
left, top, '%.3f'%height, \
width, e.color, '')
# timeline is finished
devtl.html['timeline'] += '</div>\n</div>\n'
# draw a legend which describes the phases by color
data = testruns[-1]
devtl.html['legend'] = '<div class="legend">\n'
pdelta = 100.0/len(data.phases)
pmargin = pdelta / 4.0
for phase in data.phases:
order = '%.2f' % ((data.dmesg[phase]['order'] * pdelta) + pmargin)
name = string.replace(phase, '_', ' ')
devtl.html['legend'] += html_legend.format(order, \
data.dmesg[phase]['color'], name)
devtl.html['legend'] += '</div>\n'
hf = open(sysvals.htmlfile, 'w')
thread_height = 0
# write the html header first (html head, css code, up to body start)
html_header = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>AnalyzeSuspend</title>\n\
<style type=\'text/css\'>\n\
body {overflow-y: scroll;}\n\
.stamp {width: 100%;text-align:center;background-color:gray;line-height:30px;color:white;font: 25px Arial;}\n\
.callgraph {margin-top: 30px;box-shadow: 5px 5px 20px black;}\n\
.callgraph article * {padding-left: 28px;}\n\
h1 {color:black;font: bold 30px Times;}\n\
t0 {color:black;font: bold 30px Times;}\n\
t1 {color:black;font: 30px Times;}\n\
t2 {color:black;font: 25px Times;}\n\
t3 {color:black;font: 20px Times;white-space:nowrap;}\n\
t4 {color:black;font: bold 30px Times;line-height:60px;white-space:nowrap;}\n\
table {width:100%;}\n\
.gray {background-color:rgba(80,80,80,0.1);}\n\
.green {background-color:rgba(204,255,204,0.4);}\n\
.purple {background-color:rgba(128,0,128,0.2);}\n\
.yellow {background-color:rgba(255,255,204,0.4);}\n\
.time1 {font: 22px Arial;border:1px solid;}\n\
.time2 {font: 15px Arial;border-bottom:1px solid;border-left:1px solid;border-right:1px solid;}\n\
td {text-align: center;}\n\
r {color:#500000;font:15px Tahoma;}\n\
n {color:#505050;font:15px Tahoma;}\n\
.tdhl {color: red;}\n\
.hide {display: none;}\n\
.pf {display: none;}\n\
.pf:checked + label {background: url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/><rect x="8" y="4" width="2" height="10" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:not(:checked) ~ label {background: url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:checked ~ *:not(:nth-child(2)) {display: none;}\n\
.zoombox {position: relative; width: 100%; overflow-x: scroll;}\n\
.timeline {position: relative; font-size: 14px;cursor: pointer;width: 100%; overflow: hidden; background-color:#dddddd;}\n\
.thread {position: absolute; height: '+'%.3f'%thread_height+'%; overflow: hidden; line-height: 30px; border:1px solid;text-align:center;white-space:nowrap;background-color:rgba(204,204,204,0.5);}\n\
.thread:hover {background-color:white;border:1px solid red;z-index:10;}\n\
.hover {background-color:white;border:1px solid red;z-index:10;}\n\
.traceevent {position: absolute;opacity: 0.3;height: '+'%.3f'%thread_height+'%;width:0;overflow:hidden;line-height:30px;text-align:center;white-space:nowrap;}\n\
.phase {position: absolute;overflow: hidden;border:0px;text-align:center;}\n\
.phaselet {position:absolute;overflow:hidden;border:0px;text-align:center;height:100px;font-size:24px;}\n\
.t {position:absolute;top:0%;height:100%;border-right:1px solid black;}\n\
.legend {position: relative; width: 100%; height: 40px; text-align: center;margin-bottom:20px}\n\
.legend .square {position:absolute;top:10px; width: 0px;height: 20px;border:1px solid;padding-left:20px;}\n\
button {height:40px;width:200px;margin-bottom:20px;margin-top:20px;font-size:24px;}\n\
.devlist {position:'+x2changes[1]+';width:190px;}\n\
#devicedetail {height:100px;box-shadow: 5px 5px 20px black;}\n\
</style>\n</head>\n<body>\n'
hf.write(html_header)
# write the test title and general info header
if(sysvals.stamp['time'] != ""):
hf.write(headline_stamp.format(sysvals.stamp['host'],
sysvals.stamp['kernel'], sysvals.stamp['mode'], \
sysvals.stamp['time']))
# write the device timeline
hf.write(devtl.html['timeline'])
hf.write(devtl.html['legend'])
hf.write('<div id="devicedetailtitle"></div>\n')
hf.write('<div id="devicedetail" style="display:none;">\n')
# draw the colored boxes for the device detail section
for data in testruns:
hf.write('<div id="devicedetail%d">\n' % data.testnumber)
for b in data.phases:
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal)
width = '%.3f' % ((length*100.0)/tTotal)
hf.write(html_phaselet.format(b, left, width, \
data.dmesg[b]['color']))
hf.write('</div>\n')
hf.write('</div>\n')
# write the ftrace data (callgraph)
data = testruns[-1]
if(sysvals.usecallgraph):
hf.write('<section id="callgraphs" class="callgraph">\n')
# write out the ftrace data converted to html
html_func_top = '<article id="{0}" class="atop" style="background-color:{1}">\n<input type="checkbox" class="pf" id="f{2}" checked/><label for="f{2}">{3} {4}</label>\n'
html_func_start = '<article>\n<input type="checkbox" class="pf" id="f{0}" checked/><label for="f{0}">{1} {2}</label>\n'
html_func_end = '</article>\n'
html_func_leaf = '<article>{0} {1}</article>\n'
num = 0
for p in data.phases:
list = data.dmesg[p]['list']
for devname in data.sortedDevices(p):
if('ftrace' not in list[devname]):
continue
name = devname
if(devname in sysvals.altdevname):
name = sysvals.altdevname[devname]
devid = list[devname]['id']
cg = list[devname]['ftrace']
flen = '<r>(%.3f ms @ %.3f to %.3f)</r>' % \
((cg.end - cg.start)*1000, cg.start*1000, cg.end*1000)
hf.write(html_func_top.format(devid, data.dmesg[p]['color'], \
num, name+' '+p, flen))
num += 1
for line in cg.list:
if(line.length < 0.000000001):
flen = ''
else:
flen = '<n>(%.3f ms @ %.3f)</n>' % (line.length*1000, \
line.time*1000)
if(line.freturn and line.fcall):
hf.write(html_func_leaf.format(line.name, flen))
elif(line.freturn):
hf.write(html_func_end)
else:
hf.write(html_func_start.format(num, line.name, flen))
num += 1
hf.write(html_func_end)
hf.write('\n\n </section>\n')
# write the footer and close
addScriptCode(hf, testruns)
hf.write('</body>\n</html>\n')
hf.close()
return True
# Function: addScriptCode
# Description:
# Adds the javascript code to the output html
# Arguments:
# hf: the open html file pointer
# testruns: array of Data objects from parseKernelLog or parseTraceLog
def addScriptCode(hf, testruns):
t0 = (testruns[0].start - testruns[-1].tSuspended) * 1000
tMax = (testruns[-1].end - testruns[-1].tSuspended) * 1000
# create an array in javascript memory with the device details
detail = ' var devtable = [];\n'
for data in testruns:
topo = data.deviceTopology()
detail += ' devtable[%d] = "%s";\n' % (data.testnumber, topo)
detail += ' var bounds = [%f,%f];\n' % (t0, tMax)
# add the code which will manipulate the data in the browser
script_code = \
'<script type="text/javascript">\n'+detail+\
' function zoomTimeline() {\n'\
' var timescale = document.getElementById("timescale");\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var zoombox = document.getElementById("dmesgzoombox");\n'\
' var val = parseFloat(dmesg.style.width);\n'\
' var newval = 100;\n'\
' var sh = window.outerWidth / 2;\n'\
' if(this.id == "zoomin") {\n'\
' newval = val * 1.2;\n'\
' if(newval > 40000) newval = 40000;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((zoombox.scrollLeft + sh) * newval / val) - sh;\n'\
' } else if (this.id == "zoomout") {\n'\
' newval = val / 1.2;\n'\
' if(newval < 100) newval = 100;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((zoombox.scrollLeft + sh) * newval / val) - sh;\n'\
' } else {\n'\
' zoombox.scrollLeft = 0;\n'\
' dmesg.style.width = "100%";\n'\
' }\n'\
' var html = "";\n'\
' var t0 = bounds[0];\n'\
' var tMax = bounds[1];\n'\
' var tTotal = tMax - t0;\n'\
' var wTotal = tTotal * 100.0 / newval;\n'\
' for(var tS = 1000; (wTotal / tS) < 3; tS /= 10);\n'\
' if(tS < 1) tS = 1;\n'\
' for(var s = ((t0 / tS)|0) * tS; s < tMax; s += tS) {\n'\
' var pos = (tMax - s) * 100.0 / tTotal;\n'\
' var name = (s == 0)?"S/R":(s+"ms");\n'\
' html += "<div class=\\"t\\" style=\\"right:"+pos+"%\\">"+name+"</div>";\n'\
' }\n'\
' timescale.innerHTML = html;\n'\
' }\n'\
' function deviceHover() {\n'\
' var name = this.title.slice(0, this.title.indexOf(" ("));\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = dev[i].title.slice(0, dev[i].title.indexOf(" ("));\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' dev[i].className = "thread hover";\n'\
' } else {\n'\
' dev[i].className = "thread";\n'\
' }\n'\
' }\n'\
' }\n'\
' function deviceUnhover() {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].className = "thread";\n'\
' }\n'\
' }\n'\
' function deviceTitle(title, total, cpu) {\n'\
' var prefix = "Total";\n'\
' if(total.length > 3) {\n'\
' prefix = "Average";\n'\
' total[1] = (total[1]+total[3])/2;\n'\
' total[2] = (total[2]+total[4])/2;\n'\
' }\n'\
' var devtitle = document.getElementById("devicedetailtitle");\n'\
' var name = title.slice(0, title.indexOf(" "));\n'\
' if(cpu >= 0) name = "CPU"+cpu;\n'\
' var driver = "";\n'\
' var tS = "<t2>(</t2>";\n'\
' var tR = "<t2>)</t2>";\n'\
' if(total[1] > 0)\n'\
' tS = "<t2>("+prefix+" Suspend:</t2><t0> "+total[1].toFixed(3)+" ms</t0> ";\n'\
' if(total[2] > 0)\n'\
' tR = " <t2>"+prefix+" Resume:</t2><t0> "+total[2].toFixed(3)+" ms<t2>)</t2></t0>";\n'\
' var s = title.indexOf("{");\n'\
' var e = title.indexOf("}");\n'\
' if((s >= 0) && (e >= 0))\n'\
' driver = title.slice(s+1, e) + " <t1>@</t1> ";\n'\
' if(total[1] > 0 && total[2] > 0)\n'\
' devtitle.innerHTML = "<t0>"+driver+name+"</t0> "+tS+tR;\n'\
' else\n'\
' devtitle.innerHTML = "<t0>"+title+"</t0>";\n'\
' return name;\n'\
' }\n'\
' function deviceDetail() {\n'\
' var devinfo = document.getElementById("devicedetail");\n'\
' devinfo.style.display = "block";\n'\
' var name = this.title.slice(0, this.title.indexOf(" ("));\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var idlist = [];\n'\
' var pdata = [[]];\n'\
' var pd = pdata[0];\n'\
' var total = [0.0, 0.0, 0.0];\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = dev[i].title.slice(0, dev[i].title.indexOf(" ("));\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' idlist[idlist.length] = dev[i].id;\n'\
' var tidx = 1;\n'\
' if(dev[i].id[0] == "a") {\n'\
' pd = pdata[0];\n'\
' } else {\n'\
' if(pdata.length == 1) pdata[1] = [];\n'\
' if(total.length == 3) total[3]=total[4]=0.0;\n'\
' pd = pdata[1];\n'\
' tidx = 3;\n'\
' }\n'\
' var info = dev[i].title.split(" ");\n'\
' var pname = info[info.length-1];\n'\
' pd[pname] = parseFloat(info[info.length-3].slice(1));\n'\
' total[0] += pd[pname];\n'\
' if(pname.indexOf("suspend") >= 0)\n'\
' total[tidx] += pd[pname];\n'\
' else\n'\
' total[tidx+1] += pd[pname];\n'\
' }\n'\
' }\n'\
' var devname = deviceTitle(this.title, total, cpu);\n'\
' var left = 0.0;\n'\
' for (var t = 0; t < pdata.length; t++) {\n'\
' pd = pdata[t];\n'\
' devinfo = document.getElementById("devicedetail"+t);\n'\
' var phases = devinfo.getElementsByClassName("phaselet");\n'\
' for (var i = 0; i < phases.length; i++) {\n'\
' if(phases[i].id in pd) {\n'\
' var w = 100.0*pd[phases[i].id]/total[0];\n'\
' var fs = 32;\n'\
' if(w < 8) fs = 4*w | 0;\n'\
' var fs2 = fs*3/4;\n'\
' phases[i].style.width = w+"%";\n'\
' phases[i].style.left = left+"%";\n'\
' phases[i].title = phases[i].id+" "+pd[phases[i].id]+" ms";\n'\
' left += w;\n'\
' var time = "<t4 style=\\"font-size:"+fs+"px\\">"+pd[phases[i].id]+" ms<br></t4>";\n'\
' var pname = "<t3 style=\\"font-size:"+fs2+"px\\">"+phases[i].id.replace("_", " ")+"</t3>";\n'\
' phases[i].innerHTML = time+pname;\n'\
' } else {\n'\
' phases[i].style.width = "0%";\n'\
' phases[i].style.left = left+"%";\n'\
' }\n'\
' }\n'\
' }\n'\
' var cglist = document.getElementById("callgraphs");\n'\
' if(!cglist) return;\n'\
' var cg = cglist.getElementsByClassName("atop");\n'\
' for (var i = 0; i < cg.length; i++) {\n'\
' if(idlist.indexOf(cg[i].id) >= 0) {\n'\
' cg[i].style.display = "block";\n'\
' } else {\n'\
' cg[i].style.display = "none";\n'\
' }\n'\
' }\n'\
' }\n'\
' function devListWindow(e) {\n'\
' var sx = e.clientX;\n'\
' if(sx > window.innerWidth - 440)\n'\
' sx = window.innerWidth - 440;\n'\
' var cfg="top="+e.screenY+", left="+sx+", width=440, height=720, scrollbars=yes";\n'\
' var win = window.open("", "_blank", cfg);\n'\
' if(window.chrome) win.moveBy(sx, 0);\n'\
' var html = "<title>"+e.target.innerHTML+"</title>"+\n'\
' "<style type=\\"text/css\\">"+\n'\
' " ul {list-style-type:circle;padding-left:10px;margin-left:10px;}"+\n'\
' "</style>"\n'\
' var dt = devtable[0];\n'\
' if(e.target.id != "devlist1")\n'\
' dt = devtable[1];\n'\
' win.document.write(html+dt);\n'\
' }\n'\
' window.addEventListener("load", function () {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' dmesg.style.width = "100%"\n'\
' document.getElementById("zoomin").onclick = zoomTimeline;\n'\
' document.getElementById("zoomout").onclick = zoomTimeline;\n'\
' document.getElementById("zoomdef").onclick = zoomTimeline;\n'\
' var devlist = document.getElementsByClassName("devlist");\n'\
' for (var i = 0; i < devlist.length; i++)\n'\
' devlist[i].onclick = devListWindow;\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].onclick = deviceDetail;\n'\
' dev[i].onmouseover = deviceHover;\n'\
' dev[i].onmouseout = deviceUnhover;\n'\
' }\n'\
' zoomTimeline();\n'\
' });\n'\
'</script>\n'
hf.write(script_code);
# Function: executeSuspend
# Description:
# Execute system suspend through the sysfs interface, then copy the output
# dmesg and ftrace files to the test output directory.
def executeSuspend():
global sysvals
detectUSB(False)
t0 = time.time()*1000
tp = sysvals.tpath
# execute however many s/r runs requested
for count in range(1,sysvals.execcount+1):
# clear the kernel ring buffer just as we start
os.system('dmesg -C')
# enable callgraph ftrace only for the second run
if(sysvals.usecallgraph and count == 2):
# set trace type
os.system('echo function_graph > '+tp+'current_tracer')
os.system('echo "" > '+tp+'set_ftrace_filter')
# set trace format options
os.system('echo funcgraph-abstime > '+tp+'trace_options')
os.system('echo funcgraph-proc > '+tp+'trace_options')
# focus only on device suspend and resume
os.system('cat '+tp+'available_filter_functions | '+\
'grep dpm_run_callback > '+tp+'set_graph_function')
# if this is test2 and there's a delay, start here
if(count > 1 and sysvals.x2delay > 0):
tN = time.time()*1000
while (tN - t0) < sysvals.x2delay:
tN = time.time()*1000
time.sleep(0.001)
# start ftrace
if(sysvals.usecallgraph or sysvals.usetraceevents):
print('START TRACING')
os.system('echo 1 > '+tp+'tracing_on')
# initiate suspend
if(sysvals.usecallgraph or sysvals.usetraceevents):
os.system('echo SUSPEND START > '+tp+'trace_marker')
if(sysvals.rtcwake):
print('SUSPEND START')
print('will autoresume in %d seconds' % sysvals.rtcwaketime)
sysvals.rtcWakeAlarm()
else:
print('SUSPEND START (press a key to resume)')
pf = open(sysvals.powerfile, 'w')
pf.write(sysvals.suspendmode)
# execution will pause here
pf.close()
t0 = time.time()*1000
# return from suspend
print('RESUME COMPLETE')
if(sysvals.usecallgraph or sysvals.usetraceevents):
os.system('echo RESUME COMPLETE > '+tp+'trace_marker')
# see if there's firmware timing data to be had
t = sysvals.postresumetime
if(t > 0):
print('Waiting %d seconds for POST-RESUME trace events...' % t)
time.sleep(t)
# stop ftrace
if(sysvals.usecallgraph or sysvals.usetraceevents):
os.system('echo 0 > '+tp+'tracing_on')
print('CAPTURING TRACE')
writeDatafileHeader(sysvals.ftracefile)
os.system('cat '+tp+'trace >> '+sysvals.ftracefile)
os.system('echo "" > '+tp+'trace')
# grab a copy of the dmesg output
print('CAPTURING DMESG')
writeDatafileHeader(sysvals.dmesgfile)
os.system('dmesg -c >> '+sysvals.dmesgfile)
def writeDatafileHeader(filename):
global sysvals
fw = getFPDT(False)
prt = sysvals.postresumetime
fp = open(filename, 'a')
fp.write(sysvals.teststamp+'\n')
if(fw):
fp.write('# fwsuspend %u fwresume %u\n' % (fw[0], fw[1]))
if(prt > 0):
fp.write('# post resume time %u\n' % prt)
fp.close()
# Function: executeAndroidSuspend
# Description:
# Execute system suspend through the sysfs interface
# on a remote android device, then transfer the output
# dmesg and ftrace files to the local output directory.
def executeAndroidSuspend():
global sysvals
# check to see if the display is currently off
tp = sysvals.tpath
out = os.popen(sysvals.adb+\
' shell dumpsys power | grep mScreenOn').read().strip()
# if so we need to turn it on so we can issue a new suspend
if(out.endswith('false')):
print('Waking the device up for the test...')
# send the KEYPAD_POWER keyevent to wake it up
os.system(sysvals.adb+' shell input keyevent 26')
# wait a few seconds so the user can see the device wake up
time.sleep(3)
# execute however many s/r runs requested
for count in range(1,sysvals.execcount+1):
# clear the kernel ring buffer just as we start
os.system(sysvals.adb+' shell dmesg -c > /dev/null 2>&1')
# start ftrace
if(sysvals.usetraceevents):
print('START TRACING')
os.system(sysvals.adb+" shell 'echo 1 > "+tp+"tracing_on'")
# initiate suspend
for count in range(1,sysvals.execcount+1):
if(sysvals.usetraceevents):
os.system(sysvals.adb+\
" shell 'echo SUSPEND START > "+tp+"trace_marker'")
print('SUSPEND START (press a key on the device to resume)')
os.system(sysvals.adb+" shell 'echo "+sysvals.suspendmode+\
" > "+sysvals.powerfile+"'")
# execution will pause here, then adb will exit
while(True):
check = os.popen(sysvals.adb+\
' shell pwd 2>/dev/null').read().strip()
if(len(check) > 0):
break
time.sleep(1)
if(sysvals.usetraceevents):
os.system(sysvals.adb+" shell 'echo RESUME COMPLETE > "+tp+\
"trace_marker'")
# return from suspend
print('RESUME COMPLETE')
# stop ftrace
if(sysvals.usetraceevents):
os.system(sysvals.adb+" shell 'echo 0 > "+tp+"tracing_on'")
print('CAPTURING TRACE')
os.system('echo "'+sysvals.teststamp+'" > '+sysvals.ftracefile)
os.system(sysvals.adb+' shell cat '+tp+\
'trace >> '+sysvals.ftracefile)
# grab a copy of the dmesg output
print('CAPTURING DMESG')
os.system('echo "'+sysvals.teststamp+'" > '+sysvals.dmesgfile)
os.system(sysvals.adb+' shell dmesg >> '+sysvals.dmesgfile)
# Function: setUSBDevicesAuto
# Description:
# Set the autosuspend control parameter of all USB devices to auto
# This can be dangerous, so use at your own risk, most devices are set
# to always-on since the kernel cant determine if the device can
# properly autosuspend
def setUSBDevicesAuto():
global sysvals
rootCheck()
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(re.match('.*/usb[0-9]*.*', dirname) and
'idVendor' in filenames and 'idProduct' in filenames):
os.system('echo auto > %s/power/control' % dirname)
name = dirname.split('/')[-1]
desc = os.popen('cat %s/product 2>/dev/null' % \
dirname).read().replace('\n', '')
ctrl = os.popen('cat %s/power/control 2>/dev/null' % \
dirname).read().replace('\n', '')
print('control is %s for %6s: %s' % (ctrl, name, desc))
# Function: yesno
# Description:
# Print out an equivalent Y or N for a set of known parameter values
# Output:
# 'Y', 'N', or ' ' if the value is unknown
def yesno(val):
yesvals = ['auto', 'enabled', 'active', '1']
novals = ['on', 'disabled', 'suspended', 'forbidden', 'unsupported']
if val in yesvals:
return 'Y'
elif val in novals:
return 'N'
return ' '
# Function: ms2nice
# Description:
# Print out a very concise time string in minutes and seconds
# Output:
# The time string, e.g. "1901m16s"
def ms2nice(val):
ms = 0
try:
ms = int(val)
except:
return 0.0
m = ms / 60000
s = (ms / 1000) - (m * 60)
return '%3dm%2ds' % (m, s)
# Function: detectUSB
# Description:
# Detect all the USB hosts and devices currently connected and add
# a list of USB device names to sysvals for better timeline readability
# Arguments:
# output: True to output the info to stdout, False otherwise
def detectUSB(output):
global sysvals
field = {'idVendor':'', 'idProduct':'', 'product':'', 'speed':''}
power = {'async':'', 'autosuspend':'', 'autosuspend_delay_ms':'',
'control':'', 'persist':'', 'runtime_enabled':'',
'runtime_status':'', 'runtime_usage':'',
'runtime_active_time':'',
'runtime_suspended_time':'',
'active_duration':'',
'connected_duration':''}
if(output):
print('LEGEND')
print('---------------------------------------------------------------------------------------------')
print(' A = async/sync PM queue Y/N D = autosuspend delay (seconds)')
print(' S = autosuspend Y/N rACTIVE = runtime active (min/sec)')
print(' P = persist across suspend Y/N rSUSPEN = runtime suspend (min/sec)')
print(' E = runtime suspend enabled/forbidden Y/N ACTIVE = active duration (min/sec)')
print(' R = runtime status active/suspended Y/N CONNECT = connected duration (min/sec)')
print(' U = runtime usage count')
print('---------------------------------------------------------------------------------------------')
print(' NAME ID DESCRIPTION SPEED A S P E R U D rACTIVE rSUSPEN ACTIVE CONNECT')
print('---------------------------------------------------------------------------------------------')
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(re.match('.*/usb[0-9]*.*', dirname) and
'idVendor' in filenames and 'idProduct' in filenames):
for i in field:
field[i] = os.popen('cat %s/%s 2>/dev/null' % \
(dirname, i)).read().replace('\n', '')
name = dirname.split('/')[-1]
if(len(field['product']) > 0):
sysvals.altdevname[name] = \
'%s [%s]' % (field['product'], name)
else:
sysvals.altdevname[name] = \
'%s:%s [%s]' % (field['idVendor'], \
field['idProduct'], name)
if(output):
for i in power:
power[i] = os.popen('cat %s/power/%s 2>/dev/null' % \
(dirname, i)).read().replace('\n', '')
if(re.match('usb[0-9]*', name)):
first = '%-8s' % name
else:
first = '%8s' % name
print('%s [%s:%s] %-20s %-4s %1s %1s %1s %1s %1s %1s %1s %s %s %s %s' % \
(first, field['idVendor'], field['idProduct'], \
field['product'][0:20], field['speed'], \
yesno(power['async']), \
yesno(power['control']), \
yesno(power['persist']), \
yesno(power['runtime_enabled']), \
yesno(power['runtime_status']), \
power['runtime_usage'], \
power['autosuspend'], \
ms2nice(power['runtime_active_time']), \
ms2nice(power['runtime_suspended_time']), \
ms2nice(power['active_duration']), \
ms2nice(power['connected_duration'])))
# Function: getModes
# Description:
# Determine the supported power modes on this system
# Output:
# A string list of the available modes
def getModes():
global sysvals
modes = ''
if(not sysvals.android):
if(os.path.exists(sysvals.powerfile)):
fp = open(sysvals.powerfile, 'r')
modes = string.split(fp.read())
fp.close()
else:
line = os.popen(sysvals.adb+' shell cat '+\
sysvals.powerfile).read().strip()
modes = string.split(line)
return modes
# Function: getFPDT
# Description:
# Read the acpi bios tables and pull out FPDT, the firmware data
# Arguments:
# output: True to output the info to stdout, False otherwise
def getFPDT(output):
global sysvals
rectype = {}
rectype[0] = 'Firmware Basic Boot Performance Record'
rectype[1] = 'S3 Performance Table Record'
prectype = {}
prectype[0] = 'Basic S3 Resume Performance Record'
prectype[1] = 'Basic S3 Suspend Performance Record'
rootCheck()
if(not os.path.exists(sysvals.fpdtpath)):
if(output):
doError('file doesnt exist: %s' % sysvals.fpdtpath, False)
return False
if(not os.access(sysvals.fpdtpath, os.R_OK)):
if(output):
doError('file isnt readable: %s' % sysvals.fpdtpath, False)
return False
if(not os.path.exists(sysvals.mempath)):
if(output):
doError('file doesnt exist: %s' % sysvals.mempath, False)
return False
if(not os.access(sysvals.mempath, os.R_OK)):
if(output):
doError('file isnt readable: %s' % sysvals.mempath, False)
return False
fp = open(sysvals.fpdtpath, 'rb')
buf = fp.read()
fp.close()
if(len(buf) < 36):
if(output):
doError('Invalid FPDT table data, should '+\
'be at least 36 bytes', False)
return False
table = struct.unpack('4sIBB6s8sI4sI', buf[0:36])
if(output):
print('')
print('Firmware Performance Data Table (%s)' % table[0])
print(' Signature : %s' % table[0])
print(' Table Length : %u' % table[1])
print(' Revision : %u' % table[2])
print(' Checksum : 0x%x' % table[3])
print(' OEM ID : %s' % table[4])
print(' OEM Table ID : %s' % table[5])
print(' OEM Revision : %u' % table[6])
print(' Creator ID : %s' % table[7])
print(' Creator Revision : 0x%x' % table[8])
print('')
if(table[0] != 'FPDT'):
if(output):
doError('Invalid FPDT table')
return False
if(len(buf) <= 36):
return False
i = 0
fwData = [0, 0]
records = buf[36:]
fp = open(sysvals.mempath, 'rb')
while(i < len(records)):
header = struct.unpack('HBB', records[i:i+4])
if(header[0] not in rectype):
continue
if(header[1] != 16):
continue
addr = struct.unpack('Q', records[i+8:i+16])[0]
try:
fp.seek(addr)
first = fp.read(8)
except:
doError('Bad address 0x%x in %s' % (addr, sysvals.mempath), False)
rechead = struct.unpack('4sI', first)
recdata = fp.read(rechead[1]-8)
if(rechead[0] == 'FBPT'):
record = struct.unpack('HBBIQQQQQ', recdata)
if(output):
print('%s (%s)' % (rectype[header[0]], rechead[0]))
print(' Reset END : %u ns' % record[4])
print(' OS Loader LoadImage Start : %u ns' % record[5])
print(' OS Loader StartImage Start : %u ns' % record[6])
print(' ExitBootServices Entry : %u ns' % record[7])
print(' ExitBootServices Exit : %u ns' % record[8])
elif(rechead[0] == 'S3PT'):
if(output):
print('%s (%s)' % (rectype[header[0]], rechead[0]))
j = 0
while(j < len(recdata)):
prechead = struct.unpack('HBB', recdata[j:j+4])
if(prechead[0] not in prectype):
continue
if(prechead[0] == 0):
record = struct.unpack('IIQQ', recdata[j:j+prechead[1]])
fwData[1] = record[2]
if(output):
print(' %s' % prectype[prechead[0]])
print(' Resume Count : %u' % \
record[1])
print(' FullResume : %u ns' % \
record[2])
print(' AverageResume : %u ns' % \
record[3])
elif(prechead[0] == 1):
record = struct.unpack('QQ', recdata[j+4:j+prechead[1]])
fwData[0] = record[1] - record[0]
if(output):
print(' %s' % prectype[prechead[0]])
print(' SuspendStart : %u ns' % \
record[0])
print(' SuspendEnd : %u ns' % \
record[1])
print(' SuspendTime : %u ns' % \
fwData[0])
j += prechead[1]
if(output):
print('')
i += header[1]
fp.close()
return fwData
# Function: statusCheck
# Description:
# Verify that the requested command and options will work, and
# print the results to the terminal
# Output:
# True if the test will work, False if not
def statusCheck():
global sysvals
status = True
if(sysvals.android):
print('Checking the android system ...')
else:
print('Checking this system (%s)...' % platform.node())
# check if adb is connected to a device
if(sysvals.android):
res = 'NO'
out = os.popen(sysvals.adb+' get-state').read().strip()
if(out == 'device'):
res = 'YES'
print(' is android device connected: %s' % res)
if(res != 'YES'):
print(' Please connect the device before using this tool')
return False
# check we have root access
res = 'NO (No features of this tool will work!)'
if(sysvals.android):
out = os.popen(sysvals.adb+' shell id').read().strip()
if('root' in out):
res = 'YES'
else:
if(os.environ['USER'] == 'root'):
res = 'YES'
print(' have root access: %s' % res)
if(res != 'YES'):
if(sysvals.android):
print(' Try running "adb root" to restart the daemon as root')
else:
print(' Try running this script with sudo')
return False
# check sysfs is mounted
res = 'NO (No features of this tool will work!)'
if(sysvals.android):
out = os.popen(sysvals.adb+' shell ls '+\
sysvals.powerfile).read().strip()
if(out == sysvals.powerfile):
res = 'YES'
else:
if(os.path.exists(sysvals.powerfile)):
res = 'YES'
print(' is sysfs mounted: %s' % res)
if(res != 'YES'):
return False
# check target mode is a valid mode
res = 'NO'
modes = getModes()
if(sysvals.suspendmode in modes):
res = 'YES'
else:
status = False
print(' is "%s" a valid power mode: %s' % (sysvals.suspendmode, res))
if(res == 'NO'):
print(' valid power modes are: %s' % modes)
print(' please choose one with -m')
# check if the tool can unlock the device
if(sysvals.android):
res = 'YES'
out1 = os.popen(sysvals.adb+\
' shell dumpsys power | grep mScreenOn').read().strip()
out2 = os.popen(sysvals.adb+\
' shell input').read().strip()
if(not out1.startswith('mScreenOn') or not out2.startswith('usage')):
res = 'NO (wake the android device up before running the test)'
print(' can I unlock the screen: %s' % res)
# check if ftrace is available
res = 'NO'
ftgood = verifyFtrace()
if(ftgood):
res = 'YES'
elif(sysvals.usecallgraph):
status = False
print(' is ftrace supported: %s' % res)
# what data source are we using
res = 'DMESG'
if(ftgood):
sysvals.usetraceeventsonly = True
sysvals.usetraceevents = False
for e in sysvals.traceevents:
check = False
if(sysvals.android):
out = os.popen(sysvals.adb+' shell ls -d '+\
sysvals.epath+e).read().strip()
if(out == sysvals.epath+e):
check = True
else:
if(os.path.exists(sysvals.epath+e)):
check = True
if(not check):
sysvals.usetraceeventsonly = False
if(e == 'suspend_resume' and check):
sysvals.usetraceevents = True
if(sysvals.usetraceevents and sysvals.usetraceeventsonly):
res = 'FTRACE (all trace events found)'
elif(sysvals.usetraceevents):
res = 'DMESG and FTRACE (suspend_resume trace event found)'
print(' timeline data source: %s' % res)
# check if rtcwake
res = 'NO'
if(sysvals.rtcpath != ''):
res = 'YES'
elif(sysvals.rtcwake):
status = False
print(' is rtcwake supported: %s' % res)
return status
# Function: doError
# Description:
# generic error function for catastrphic failures
# Arguments:
# msg: the error message to print
# help: True if printHelp should be called after, False otherwise
def doError(msg, help):
if(help == True):
printHelp()
print('ERROR: %s\n') % msg
sys.exit()
# Function: doWarning
# Description:
# generic warning function for non-catastrophic anomalies
# Arguments:
# msg: the warning message to print
# file: If not empty, a filename to request be sent to the owner for debug
def doWarning(msg, file):
print('/* %s */') % msg
if(file):
print('/* For a fix, please send this'+\
' %s file to <[email protected]> */' % file)
# Function: rootCheck
# Description:
# quick check to see if we have root access
def rootCheck():
if(os.environ['USER'] != 'root'):
doError('This script must be run as root', False)
# Function: getArgInt
# Description:
# pull out an integer argument from the command line with checks
def getArgInt(name, args, min, max):
try:
arg = args.next()
except:
doError(name+': no argument supplied', True)
try:
val = int(arg)
except:
doError(name+': non-integer value given', True)
if(val < min or val > max):
doError(name+': value should be between %d and %d' % (min, max), True)
return val
# Function: rerunTest
# Description:
# generate an output from an existing set of ftrace/dmesg logs
def rerunTest():
global sysvals
if(sysvals.ftracefile != ''):
doesTraceLogHaveTraceEvents()
if(sysvals.dmesgfile == '' and not sysvals.usetraceeventsonly):
doError('recreating this html output '+\
'requires a dmesg file', False)
sysvals.setOutputFile()
vprint('Output file: %s' % sysvals.htmlfile)
print('PROCESSING DATA')
if(sysvals.usetraceeventsonly):
testruns = parseTraceLog()
else:
testruns = loadKernelLog()
for data in testruns:
parseKernelLog(data)
if(sysvals.ftracefile != ''):
appendIncompleteTraceLog(testruns)
createHTML(testruns)
# Function: runTest
# Description:
# execute a suspend/resume, gather the logs, and generate the output
def runTest(subdir):
global sysvals
# prepare for the test
if(not sysvals.android):
initFtrace()
else:
initFtraceAndroid()
sysvals.initTestOutput(subdir)
vprint('Output files:\n %s' % sysvals.dmesgfile)
if(sysvals.usecallgraph or
sysvals.usetraceevents or
sysvals.usetraceeventsonly):
vprint(' %s' % sysvals.ftracefile)
vprint(' %s' % sysvals.htmlfile)
# execute the test
if(not sysvals.android):
executeSuspend()
else:
executeAndroidSuspend()
# analyze the data and create the html output
print('PROCESSING DATA')
if(sysvals.usetraceeventsonly):
# data for kernels 3.15 or newer is entirely in ftrace
testruns = parseTraceLog()
else:
# data for kernels older than 3.15 is primarily in dmesg
testruns = loadKernelLog()
for data in testruns:
parseKernelLog(data)
if(sysvals.usecallgraph or sysvals.usetraceevents):
appendIncompleteTraceLog(testruns)
createHTML(testruns)
# Function: runSummary
# Description:
# create a summary of tests in a sub-directory
def runSummary(subdir, output):
global sysvals
# get a list of ftrace output files
files = []
for dirname, dirnames, filenames in os.walk(subdir):
for filename in filenames:
if(re.match('.*_ftrace.txt', filename)):
files.append("%s/%s" % (dirname, filename))
# process the files in order and get an array of data objects
testruns = []
for file in sorted(files):
if output:
print("Test found in %s" % os.path.dirname(file))
sysvals.ftracefile = file
sysvals.dmesgfile = file.replace('_ftrace.txt', '_dmesg.txt')
doesTraceLogHaveTraceEvents()
sysvals.usecallgraph = False
if not sysvals.usetraceeventsonly:
if(not os.path.exists(sysvals.dmesgfile)):
print("Skipping %s: not a valid test input" % file)
continue
else:
if output:
f = os.path.basename(sysvals.ftracefile)
d = os.path.basename(sysvals.dmesgfile)
print("\tInput files: %s and %s" % (f, d))
testdata = loadKernelLog()
data = testdata[0]
parseKernelLog(data)
testdata = [data]
appendIncompleteTraceLog(testdata)
else:
if output:
print("\tInput file: %s" % os.path.basename(sysvals.ftracefile))
testdata = parseTraceLog()
data = testdata[0]
data.normalizeTime(data.tSuspended)
link = file.replace(subdir+'/', '').replace('_ftrace.txt', '.html')
data.outfile = link
testruns.append(data)
createHTMLSummarySimple(testruns, subdir+'/summary.html')
# Function: printHelp
# Description:
# print out the help text
def printHelp():
global sysvals
modes = getModes()
print('')
print('AnalyzeSuspend v%.1f' % sysvals.version)
print('Usage: sudo analyze_suspend.py <options>')
print('')
print('Description:')
print(' This tool is designed to assist kernel and OS developers in optimizing')
print(' their linux stack\'s suspend/resume time. Using a kernel image built')
print(' with a few extra options enabled, the tool will execute a suspend and')
print(' capture dmesg and ftrace data until resume is complete. This data is')
print(' transformed into a device timeline and an optional callgraph to give')
print(' a detailed view of which devices/subsystems are taking the most')
print(' time in suspend/resume.')
print('')
print(' Generates output files in subdirectory: suspend-mmddyy-HHMMSS')
print(' HTML output: <hostname>_<mode>.html')
print(' raw dmesg output: <hostname>_<mode>_dmesg.txt')
print(' raw ftrace output: <hostname>_<mode>_ftrace.txt')
print('')
print('Options:')
print(' [general]')
print(' -h Print this help text')
print(' -v Print the current tool version')
print(' -verbose Print extra information during execution and analysis')
print(' -status Test to see if the system is enabled to run this tool')
print(' -modes List available suspend modes')
print(' -m mode Mode to initiate for suspend %s (default: %s)') % (modes, sysvals.suspendmode)
print(' -rtcwake t Use rtcwake to autoresume after <t> seconds (default: disabled)')
print(' [advanced]')
print(' -f Use ftrace to create device callgraphs (default: disabled)')
print(' -filter "d1 d2 ..." Filter out all but this list of dev names')
print(' -x2 Run two suspend/resumes back to back (default: disabled)')
print(' -x2delay t Minimum millisecond delay <t> between the two test runs (default: 0 ms)')
print(' -postres t Time after resume completion to wait for post-resume events (default: 0 S)')
print(' -multi n d Execute <n> consecutive tests at <d> seconds intervals. The outputs will')
print(' be created in a new subdirectory with a summary page.')
print(' [utilities]')
print(' -fpdt Print out the contents of the ACPI Firmware Performance Data Table')
print(' -usbtopo Print out the current USB topology with power info')
print(' -usbauto Enable autosuspend for all connected USB devices')
print(' [android testing]')
print(' -adb binary Use the given adb binary to run the test on an android device.')
print(' The device should already be connected and with root access.')
print(' Commands will be executed on the device using "adb shell"')
print(' [re-analyze data from previous runs]')
print(' -ftrace ftracefile Create HTML output using ftrace input')
print(' -dmesg dmesgfile Create HTML output using dmesg (not needed for kernel >= 3.15)')
print(' -summary directory Create a summary of all test in this dir')
print('')
return True
# ----------------- MAIN --------------------
# exec start (skipped if script is loaded as library)
if __name__ == '__main__':
cmd = ''
cmdarg = ''
multitest = {'run': False, 'count': 0, 'delay': 0}
# loop through the command line arguments
args = iter(sys.argv[1:])
for arg in args:
if(arg == '-m'):
try:
val = args.next()
except:
doError('No mode supplied', True)
sysvals.suspendmode = val
elif(arg == '-adb'):
try:
val = args.next()
except:
doError('No adb binary supplied', True)
if(not os.path.exists(val)):
doError('file doesnt exist: %s' % val, False)
if(not os.access(val, os.X_OK)):
doError('file isnt executable: %s' % val, False)
try:
check = os.popen(val+' version').read().strip()
except:
doError('adb version failed to execute', False)
if(not re.match('Android Debug Bridge .*', check)):
doError('adb version failed to execute', False)
sysvals.adb = val
sysvals.android = True
elif(arg == '-x2'):
if(sysvals.postresumetime > 0):
doError('-x2 is not compatible with -postres', False)
sysvals.execcount = 2
elif(arg == '-x2delay'):
sysvals.x2delay = getArgInt('-x2delay', args, 0, 60000)
elif(arg == '-postres'):
if(sysvals.execcount != 1):
doError('-x2 is not compatible with -postres', False)
sysvals.postresumetime = getArgInt('-postres', args, 0, 3600)
elif(arg == '-f'):
sysvals.usecallgraph = True
elif(arg == '-modes'):
cmd = 'modes'
elif(arg == '-fpdt'):
cmd = 'fpdt'
elif(arg == '-usbtopo'):
cmd = 'usbtopo'
elif(arg == '-usbauto'):
cmd = 'usbauto'
elif(arg == '-status'):
cmd = 'status'
elif(arg == '-verbose'):
sysvals.verbose = True
elif(arg == '-v'):
print("Version %.1f" % sysvals.version)
sys.exit()
elif(arg == '-rtcwake'):
sysvals.rtcwake = True
sysvals.rtcwaketime = getArgInt('-rtcwake', args, 0, 3600)
elif(arg == '-multi'):
multitest['run'] = True
multitest['count'] = getArgInt('-multi n (exec count)', args, 2, 1000000)
multitest['delay'] = getArgInt('-multi d (delay between tests)', args, 0, 3600)
elif(arg == '-dmesg'):
try:
val = args.next()
except:
doError('No dmesg file supplied', True)
sysvals.notestrun = True
sysvals.dmesgfile = val
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s doesnt exist' % sysvals.dmesgfile, False)
elif(arg == '-ftrace'):
try:
val = args.next()
except:
doError('No ftrace file supplied', True)
sysvals.notestrun = True
sysvals.usecallgraph = True
sysvals.ftracefile = val
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s doesnt exist' % sysvals.ftracefile, False)
elif(arg == '-summary'):
try:
val = args.next()
except:
doError('No directory supplied', True)
cmd = 'summary'
cmdarg = val
sysvals.notestrun = True
if(os.path.isdir(val) == False):
doError('%s isnt accesible' % val, False)
elif(arg == '-filter'):
try:
val = args.next()
except:
doError('No devnames supplied', True)
sysvals.setDeviceFilter(val)
elif(arg == '-h'):
printHelp()
sys.exit()
else:
doError('Invalid argument: '+arg, True)
# just run a utility command and exit
if(cmd != ''):
if(cmd == 'status'):
statusCheck()
elif(cmd == 'fpdt'):
if(sysvals.android):
doError('cannot read FPDT on android device', False)
getFPDT(True)
elif(cmd == 'usbtopo'):
if(sysvals.android):
doError('cannot read USB topology '+\
'on an android device', False)
detectUSB(True)
elif(cmd == 'modes'):
modes = getModes()
print modes
elif(cmd == 'usbauto'):
setUSBDevicesAuto()
elif(cmd == 'summary'):
print("Generating a summary of folder \"%s\"" % cmdarg)
runSummary(cmdarg, True)
sys.exit()
# run test on android device
if(sysvals.android):
if(sysvals.usecallgraph):
doError('ftrace (-f) is not yet supported '+\
'in the android kernel', False)
if(sysvals.notestrun):
doError('cannot analyze test files on the '+\
'android device', False)
# if instructed, re-analyze existing data files
if(sysvals.notestrun):
rerunTest()
sys.exit()
# verify that we can run a test
if(not statusCheck()):
print('Check FAILED, aborting the test run!')
sys.exit()
if multitest['run']:
# run multiple tests in a separte subdirectory
s = 'x%d' % multitest['count']
subdir = datetime.now().strftime('suspend-'+s+'-%m%d%y-%H%M%S')
os.mkdir(subdir)
for i in range(multitest['count']):
if(i != 0):
print('Waiting %d seconds...' % (multitest['delay']))
time.sleep(multitest['delay'])
print('TEST (%d/%d) START' % (i+1, multitest['count']))
runTest(subdir)
print('TEST (%d/%d) COMPLETE' % (i+1, multitest['count']))
runSummary(subdir, False)
else:
# run the test in the current directory
runTest(".")
| 120,394 | 32.517539 | 438 |
py
|
f2fs-stable-linux-3.18.y
|
f2fs-stable-linux-3.18.y/scripts/tracing/draw_functrace.py
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| 3,560 | 26.392308 | 70 |
py
|
f2fs-stable-linux-3.18.y
|
f2fs-stable-linux-3.18.y/scripts/rt-tester/rt-tester.py
|
#!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| 5,305 | 23.228311 | 70 |
py
|
f2fs-stable-linux-3.18.y
|
f2fs-stable-linux-3.18.y/arch/ia64/scripts/unwcheck.py
|
#!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| 1,714 | 25.384615 | 89 |
py
|
EnergyVariables
|
EnergyVariables-main/src/3_ERA5_PopWeightedTemp.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Remade on 21 feb 2022
@author: Laurens Stoop - [email protected]
Following example by Matteo de Felice: http://www.matteodefelice.name/post/aggregating-gridded-data/
"""
#%%
# =============================================================================
# Dependencies
# =============================================================================
## Importing modules
import xarray as xr
import numpy as np
import regionmask
import geopandas as gpd
import datetime
import os.path
# Select the years to run
years = np.array([
'1950', '1951', '1952',
'1953', '1954', '1955',
'1956', '1957', '1958',
'1959', '1960', '1961',
'1962', '1963', '1964',
'1965', '1966', '1967',
'1968', '1969', '1970',
'1971', '1972', '1973',
'1974', '1975', '1976',
'1977', '1978',
'1979', '1980', '1981',
'1982', '1983', '1984',
'1985', '1986', '1987',
'1988', '1989',
'1990',
'1991', '1992', '1993',
'1994', '1995', '1996',
'1997', '1998', '1999',
'2000', '2001', '2002',
'2003', '2004', '2005',
'2006', '2007', '2008',
'2009', '2010', '2011',
'2012', '2013', '2014',
'2015', '2016', '2017',
'2018', '2019',
'2020'
])
# Set the path for the data
PATH_TO_NUTS0 = '/media/DataStager1/Other/RegionDefinitions/ENTSO-E_StudyZones/DTU-PECD22-Polygons_SZ_VF2021.shp'
# PATH_TO_NUTS1 = '/media/DataStager1/Other/RegionDefinitions/ENTSO-E_StudyZones/DTU-PECD22-Polygons_VF2021.shp'
# Read NetCDF
# FOLDER_WITH_NETCDF = '/media/DataGate2/ERA5/BASE2/'
FOLDER_WITH_NETCDF = '/media/DataStager2/ERA5_BASE2_t2m/'
FOLDER_STORE = '/media/DataStager2/ERA5_PWT/'
# =============================================================================
# Open population & demand
# =============================================================================
# open the population file & select 2020
# Terminal job: cdo -select,timestep=1,2,3,4,5 gpw_v4_population_count_rev11_ERA5-remapcon.nc GPW_ERA5_data-only.nc
pop_file = '/media/DataStager1/Other/PopulationGDP/GPDW_v4/GPW_ERA5_data-only.nc'
dsp = xr.open_dataset(pop_file) #(2000, 2005, 2010, 2015, 2020, [Not important variables])
dsp = dsp.rename({'Population Count, v4.11 (2000, 2005, 2010, 2015, 2020): 2.5 arc-minutes' : 'pop'} )
dsp = dsp.isel(time=4).reset_coords().drop('time')
dsp = dsp.rename({'longitude': 'lon','latitude': 'lat'})
print('NOTIFY: Initialization is complete, Skynet active')
#%%
# =============================================================================
# Load in the base shapefile
# =============================================================================
# Load the shapefile
nuts0 = gpd.read_file(PATH_TO_NUTS0)
# nuts1 = gpd.read_file(PATH_TO_NUTS1)
# There are regions we do not consider
not_considered_nuts0 = [
'JO00', 'JO00_OFF', # Jordany
# 'MA00', 'MA00_OFF', # Marocco
# 'SY00', 'SY00_OFF', # Syria
# 'TN00', 'TN00_OFF', # Tunisia
'IS00', 'IS00_OFF', # Iceland
# 'IL00', 'IL00_OFF', # Israel
'PS00', 'PS00_OFF', # Palistine & Gaza
# 'EG00', 'EG00_OFF', # Egypt
# 'DZ00', 'DZ00_OFF', # Algeria
# 'LY00', 'LY00_OFF', # Libya
#
# Regions not considered resolution or model constrains
'SI00_OFF', # Slovenia offshore is to small for ERA5 data
'BA00_OFF', # Bosnia and Herzegovina offshore region to small for ERA5 data
'MT00', # Malta is to small for data on the island
]
# Now set all nuts0 regions we do not consider to NaN's
for NC in not_considered_nuts0:
nuts0 = nuts0.where(nuts0['Study Zone'] != NC)
# Removal of all NaN's from the table
nuts0 = nuts0.dropna()
# # There is an empty LY00 zone in there
# nuts1.iloc[246]
# nuts1 = nuts1.drop(index=246)
# To check some info you could read the headers of the shapefiles
# nuts0.head() # to check the contents --> 121 on-/offshore definitions
# nuts1.head() # to check the contents --> 262 on-/offshore definitions
#%%
# =============================================================================
# Load in the datafiles them self
# =============================================================================
# The mega loop
for year in years:
# for month in ['01']: #, '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']:
# Define the file name
# file_save = FOLDER_STORE+'ERA5_PWT_'+year+month+'.nc'
file_save = FOLDER_STORE+'ERA5_PWT_'+year+'.nc'
# Check if file allready exist, then get out
if os.path.isfile(file_save) == True:
# Tell us the file exist
print('NOTIFY: Allready applied for year '+year+'!')
# IF the file doesn't exist, apply the distribution
elif os.path.isfile(file_save) == False:
print('NOTIFY: Working on year '+year+'!')
# Load in the NetCDF
ds = xr.open_mfdataset(FOLDER_WITH_NETCDF+'ERA5-EU_'+year+'*.nc') #, chunks = {'time': 8760})
# ds = xr.open_dataset(FOLDER_WITH_NETCDF+'ERA5-EU_'+year+month+'.nc') #, chunks = {'time': 8760})
# remaming the coordinates
ds = ds.rename({'longitude': 'lon','latitude': 'lat'})
ds['t2m'] = ds.t2m - 273.4
# Adding the population based weights
weights_pop = dsp.pop.fillna(0)
weights_pop.name = 'weights'
#%%
# =============================================================================
# Now we define the regionmask and to later apply it
# =============================================================================
# CALCULATE MASK
SZ0_mask_poly = regionmask.Regions(name = 'ENTSO-E_StudyZone0_Mask', numbers = np.arange(0,len(nuts0)), abbrevs = list(nuts0['Study Zone']), outlines = list(nuts0.geometry.values[i] for i in np.arange(0,len(nuts0)))) # names = list(nuts0['Study Zone']),
# SZ1_mask_poly = regionmask.Regions(name = 'ENTSO-E_StudyZone1_Mask', numbers = np.arange(0,len(nuts1)), abbrevs = list(nuts1['Code']), outlines = list(nuts1.geometry.values[i] for i in np.arange(0,len(nuts0)))) # names = list(nuts1['Study Zone']), # print(nuts_mask_poly)
# Create the mask
mask = SZ0_mask_poly.mask(ds.isel(time = 0), method = None)
# mask = SZ1_mask_poly.mask(ds.isel(time = 0), method = None)
# mask # To check the contents of the mask defined
#%%
# =============================================================================
# Now selecting a region to select the data
# =============================================================================
# Prepare a dataset for filling with regional population weighted mean t2m data
PWT=[]
REGION_NAME=[]
# Select a region (the Netherlands is 12/54 in NUTS0)
for ID_REGION in np.arange(0,len(nuts0)):
# for ID_REGION in np.arange(0,len(nuts1)):
# for ID_REGION in [7, 36, 48, 49, 50, 92, 95, 97, 99, 100]: # the interesting countries
# Determine the region name
region_name = nuts0.iloc[ID_REGION]['Study Zone']
print(' : ('+str(ID_REGION+1)+'/112) '+region_name+'!')
if region_name[-4:] != '_OFF':
# Select the lat/lon combo's as vector to use later
lat = mask.lat.values
lon = mask.lon.values
# We select the region under consideration
sel_mask = mask.where(mask == ID_REGION).values
# Select the specific lat/lon combo that is the minimum bounding box
id_lon = lon[np.where(~np.all(np.isnan(sel_mask), axis=0))]
id_lat = lat[np.where(~np.all(np.isnan(sel_mask), axis=1))]
# This is the fancy loop by Matteo that uses the compute dask function to load and slice the data
out_sel = ds.sel(lat = slice(id_lat[0], id_lat[-1]), lon = slice(id_lon[0], id_lon[-1])).compute().where(mask == ID_REGION)
# Weighted mean
out_sel_of = out_sel.weighted(weights_pop)
PWT.append(out_sel_of.mean(("lat","lon")).t2m.values)
# Just a list of names
REGION_NAME.append(region_name)
# also fix Malta
elif region_name == 'MT00_OFF':
# Select the lat/lon combo's as vector to use later
lat = mask.lat.values
lon = mask.lon.values
# We select the region under consideration
sel_mask = mask.where(mask == ID_REGION).values
# Select the specific lat/lon combo that is the minimum bounding box
id_lon = lon[np.where(~np.all(np.isnan(sel_mask), axis=0))]
id_lat = lat[np.where(~np.all(np.isnan(sel_mask), axis=1))]
# This is the fancy loop by Matteo that uses the compute dask function to load and slice the data
out_sel = ds.sel(lat = slice(id_lat[0], id_lat[-1]), lon = slice(id_lon[0], id_lon[-1])).compute().where(mask == ID_REGION)
# Weighted mean
out_sel_of = out_sel.weighted(weights_pop)
PWT.append(out_sel_of.mean(("lat","lon")).t2m.values)
# Just a list of names
REGION_NAME.append('MT00')
# out of the region loop we create new arrays with the info
ds_save = xr.Dataset()
ds_save['PWT'] = xr.DataArray(PWT, coords=[REGION_NAME, ds.time], dims=["region", "time"])
#%%
# =============================================================================
# Setting units & saving
# =============================================================================
# Setting the general dataset attributes
ds_save.attrs.update(
author = 'Laurens Stoop UU/KNMI/TenneT',
units = '[a.u.]',
created = datetime.datetime.today().strftime('%d-%m-%Y'),
map_area = 'Europe',
region_definition = 'ENTSO-E StudyZones at national level aggregated',
data_source = 'Population weighted temperarute for each ENTSO-E zone, contains modified Copernicus Climate Change Service information [28-02-2022]'
)
# Saving the file
ds_save.to_netcdf(file_save, encoding={'time':{'units':'days since 1900-01-01'}})
| 11,529 | 42.183521 | 292 |
py
|
EnergyVariables
|
EnergyVariables-main/src/1_ERA5_CF-MZ_to-csv.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Restructered on Sun 26 Jan 2022 17:04
@author: Laurens Stoop - [email protected]
Following example by Matteo de Felice: http://www.matteodefelice.name/post/aggregating-gridded-data/
"""
#%%
# =============================================================================
# Dependencies
# =============================================================================
## Importing modules
import xarray as xr
import datetime
import numpy as np
import pandas as pd
import os.path
# Select the years to run
years = np.array([
'1950', '1951', '1952',
'1953', '1954', '1955',
'1956', '1957', '1958',
'1959', '1960', '1961',
'1962', '1963', '1964',
'1965', '1966', '1967',
'1968', '1969', '1970',
'1971', '1972', '1973',
'1974', '1975', '1976',
'1977', '1978',
'1979', '1980', '1981',
'1982', '1983', '1984',
'1985', '1986', '1987',
'1988', '1989',
'1990',
'1991', '1992', '1993',
'1994', '1995', '1996',
'1997', '1998', '1999',
'2000', '2001', '2002',
'2003', '2004', '2005',
'2006', '2007', '2008',
'2009', '2010', '2011',
'2012', '2013', '2014',
'2015', '2016', '2017',
'2018', '2019',
'2020'
])
# Read NetCDF
FOLDER_WITH_NETCDF = '/media/DataStager2/ERA5_CF_MZ/'
FOLDER_STORE = '/media/DataStager2/ERA5_CF_MZ/csv/'
print('NOTIFY: Initialization is complete, Skynet active')
#%%
# =============================================================================
# Load in the base shapefile
# =============================================================================
#%%
# =============================================================================
# Load in the datafiles them self
# =============================================================================
# The mega loop
for year in years:
# Define the file name
file_save = FOLDER_STORE+'ERA5_CF_MZ_'+year+'.csv'
# Check if file allready exist, then get out
if os.path.isfile(file_save) == True:
# Tell us the file exist
print('NOTIFY: Allready applied for year '+year+'!')
# IF the file doesn't exist, apply the distribution
elif os.path.isfile(file_save) == False:
print('NOTIFY: Working on year '+year+'!')
# Load in the NetCDF
ds = xr.open_mfdataset(FOLDER_WITH_NETCDF+'ERA5_CF_MZ_WM_'+year+'*.nc')
# make it a csv
df_won = ds.WON.to_dataframe('region').unstack().T
df_wof = ds.WOF.to_dataframe('region').unstack().T
df_spv = ds.SPV.to_dataframe('region').unstack().T
#%%
# =============================================================================
# Setting units & saving
# =============================================================================
df_won.to_csv(FOLDER_STORE+'ERA5_CF-MZ_'+year+'_WON.csv')
df_wof.to_csv(FOLDER_STORE+'ERA5_CF-MZ_'+year+'_WOF.csv')
df_spv.to_csv(FOLDER_STORE+'ERA5_CF-MZ_'+year+'_SPV.csv')
| 3,355 | 29.788991 | 100 |
py
|
EnergyVariables
|
EnergyVariables-main/src/1B_ERA5_PECD_comp.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 27 09:34:10 2022
@author: Laurens Stoop - [email protected]
"""
#%%
# =============================================================================
# Dependencies
# =============================================================================
## Importing modules
import xarray as xr
import numpy as np
import regionmask
import geopandas as gpd
import datetime
import pandas as pd
import matplotlib.pyplot as plt
import os.path
# Location of datasets
FOLDER_EV = '/media/DataStager2/ERA5_EV/'
FOLDER_PECD = '/media/DataStager1/Other/PECDv3.1/'
#%%
# =============================================================================
# Loading the EV & PECD data
# =============================================================================
# EV versions
dsR = xr.open_dataset(FOLDER_EV+'ERA5_EV_RM_2010.nc')
dsW = xr.open_dataset(FOLDER_EV+'ERA5_EV_WM_2010.nc')
# Load the PECD
df_TR = pd.read_csv(FOLDER_PECD+'PECD_Onshore_2030_edition 2021.3_TR00.csv', sep=',', skiprows=10, header=[0]) #, names=['Code', 'Type', 'Capacity'])
| 1,104 | 25.95122 | 149 |
py
|
EnergyVariables
|
EnergyVariables-main/src/6_LoadModelLSTR.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Made on 28 feb 2022
@author: Laurens Stoop - [email protected]
"""
#%%
# =============================================================================
# Dependencies
# =============================================================================
## Importing modules
import xarray as xr
import numpy as np
import pandas as pd
import datetime
import matplotlib.pyplot as plt
from scipy.optimize import least_squares as lsq
from scipy.optimize import curve_fit
from scipy import stats
#import local package
# import rtts
# Versions of the
TYNDP_scenarios_capacity = np.array([
'DE_2030',
'DE_2040',
'GA_2030',
'GA_2040',
'NT_2025',
'NT_2030',
'NT_2040'
])
region_names = [
'AL00','AT00','BA00','BE00','BG00',
'CH00','CY00','CZ00','DE00','DKE1',
'DKKF','DKW1','EE00','EL00','EL03',
'ES00','FI00','FR00','FR15','HR00',
'HU00','IE00','ITCN','ITCS','ITN1',
'ITS1','ITSA','ITSI','LT00','LU00',
'LV00','ME00','MK00','MT00','NL00',
'NOM1','NON1','NOS0','PL00','PT00',
'RO00','RS00','SE01','SE02','SE03',
'SE04','SI00','SK00','TR00','UA01',
'UK00','UKNI']
# Read NetCDF
FOLDER_STORE = '/media/DataStager2/ERA5_LoadModel/'
print('NOTIFY: Initialization is complete, Skynet active')
#%%
scenario_capacity = 'DE_2030'
region_name = 'FR00'
#%%
# =============================================================================
# Open the files
# =============================================================================
# open the dataset
ds = xr.open_dataset(FOLDER_STORE+'ERA5_LoadModelData_'+scenario_capacity+'.nc')
# prep the data to a single region
dsr = xr.Dataset()
dsr = ds.sel(region=region_name)
# Convert to a dataframe
df = dsr.to_dataframe()
#%%
# =============================================================================
# Splitting the data
# =============================================================================
# x_train, x_test, y_train, y_test = rtts.region_train_test_split(df.drop(columns=['DEM']), df.DEM)
#%%
# =============================================================================
# Fitting the data
# =============================================================================
# Function definition
def LSTRmodel(temp, C):
G = 1/(1 + np.exp( -C[4]*(temp - C[5])))
return (C[0] + C[1]*temp)*(1 - G) + (C[2] + C[3]*temp)*G
# Function definition
def LSTRmodelResidual(C,temp, dem):
return LSTRmodel(temp, C) - dem
# Initial guess of the fit
x0 = [
75000., # Zero crossing left branch
-3200., # slope left branch
8000., # Zero crossing right branch
1000., # slope right branch
1., # Gamma factor (unkown, guess = 1)
15.4 # Inflection point from literature (J. Moral-Carcedo, J. Vicens-Otero / Energy Economics 27 (2005) 477–494)
]
# The fitting procedure
fitvalues = lsq(LSTRmodelResidual, x0, loss='cauchy', f_scale=0.1, args=(dsr.PWT, dsr.DEM))
# perr = np.sqrt(np.diag(fitvalues.jac))
# =============================================================================
# Check if data is good
# =============================================================================
# temp_test = np.linspace(dsr.PWT.min().values, dsr.PWT.max().values, 300)
# demand_test = LSTRmodel(temp_test, fitvalues.x)
# The data moddeled to which the fit is made
dsr['lstrDEM'] = LSTRmodel(dsr.PWT, fitvalues.x)
# The r value
r = stats.linregress(dsr.DEM,dsr.lstrDEM)
# The RMSE value
rmse = np.sqrt(((dsr.DEM - dsr.lstrDEM) ** 2).mean())
plt.figure(figsize=(8,8))
ax = plt.axes()
plt.scatter(dsr.DEM, dsr.lstrDEM, s=0.3)
plt.title('Applied for FR00, DE-2030; R='+str(r.rvalue.round(3))+', RMSE='+str(rmse.values.round(2)))
plt.xlabel('Measured demand [MWh]')
plt.ylabel('LSTR modelled demand [MWh]')
plt.ylim(2e4, 12e4)
plt.xlim(2e4, 12e4)
plt.tight_layout()
# plt.ioff()
plt.savefig('/home/stoop/Documents/Project/EnergyVariables-EV/results/figures/LSTRmodel/LSTRmodel_'+scenario_capacity+'_'+region_name+'.png')
plt.figure(figsize=(8,8))
ax = plt.axes()
dsr.lstrDEM.plot(ax=ax)
dsr.DEM.plot(ax=ax)
plt.title('Applied for FR00, DE-2030; R='+str(r.rvalue.round(3))+', RMSE='+str(rmse.values.round(2)))
# plt.xlabel('Measured demand [MWh]')
# plt.ylabel('LSTR modelled demand [MWh]')
# plt.ylim(2e4, 12e4)
# plt.xlim('2010-01-01', '2010-06-01')
plt.tight_layout()
# plt.ioff()
plt.savefig('/home/stoop/Documents/Project/EnergyVariables-EV/results/figures/LSTRmodel/OutputComp_'+scenario_capacity+'_'+region_name+'.png')
| 4,636 | 27.447853 | 142 |
py
|
EnergyVariables
|
EnergyVariables-main/src/2_ERA5_EV_ApplyTYNDP.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 27 09:34:10 2022
@author: Laurens Stoop - [email protected]
"""
#%%
# =============================================================================
# Dependencies
# =============================================================================
## Importing modules
import xarray as xr
import numpy as np
import regionmask
import geopandas as gpd
import datetime
import pandas as pd
import matplotlib.pyplot as plt
import os.path
# Location of datasets
FOLDER_CF_MZ = '/media/DataStager2/ERA5_CF_MZ/'
FOLDER_EV = '/media/DataStager2/ERA5_EV/'
# Set the path for the data
PATH_TO_TYNDP = '/media/DataStager1/Other/CapacityDistribution/TYNDP/Originv3/'
# Select the years to run
years = np.array([
'1950', '1951', '1952',
'1953', '1954', '1955',
'1956', '1957', '1958',
'1959', '1960', '1961',
'1962', '1963', '1964',
'1965', '1966', '1967',
'1968', '1969', '1970',
'1971', '1972', '1973',
'1974', '1975', '1976',
'1977', '1978',
'1979', '1980', '1981',
'1982', '1983', '1984',
'1985', '1986', '1987',
'1988', '1989', '1990',
'1991', '1992', '1993',
'1994', '1995', '1996',
'1997', '1998', '1999',
'2000', '2001', '2002',
'2003', '2004', '2005',
'2006', '2007', '2008',
'2009', '2010', '2011',
'2012', '2013', '2014',
'2015', '2016', '2017',
'2018', '2019',
'2020'
])
# Versions of the
TYNDP_scenarios_capacity = np.array([
'DE_2030',
'DE_2040',
'GA_2030',
'GA_2040',
'NT_2025',
'NT_2030',
'NT_2040'
])
#%%
# =============================================================================
# Set the specifics to run over
# =============================================================================
# Set the year to run over
for year in years:
print('NOTIFY: Working on year '+year+'!')
#%%
# =============================================================================
# Loading the EV & PECD data
# =============================================================================
# EV versions
dsW = xr.open_dataset(FOLDER_CF_MZ+'ERA5_CF_MZ_WM_'+year+'.nc')
#Fix a few naming issues
region_name_fix= [
'SE04_OFF', 'SE03_OFF', 'CY00_OFF', 'MT00', 'ITSI', 'LB00', 'PL00', # MT00_OFF -> MT00
'LT00', 'DKKF', 'ITCN_OFF', 'LU00', 'NL00_OFF', 'FR00_OFF',
'FI00_OFF', 'BG00', 'BG00_OFF', 'MA00_OFF', 'NOM1_OFF', 'NON1', 'CZ00',
'SK00', 'CH00', 'IE00_OFF', 'SY00', 'UKNI_OFF', 'TN00_OFF', 'FR15',
'RS00', 'ITN1_OFF', 'NOS0', 'IE00', 'DE00', 'AT00', 'EL00_OFF', # GR00_OFF -> EL00_OFF
'DKE1_OFF', 'MD00', 'ES00', 'AL00', 'SY00_OFF', 'NOS0_OFF', 'HR00_OFF',
'UA02_OFF', 'RO00', 'PT00_OFF', 'ME00', 'HR00', 'DBOT_OFF', 'DKE1',
'LV00', 'NL00', 'TR00', 'NON1_OFF', 'TR00_OFF', 'ITCS_OFF', 'DBUK_OFF',
'RO00_OFF', 'MA00', 'EL00', 'EL03', 'IL00_OFF', 'TN00', 'EG00', 'UA01', # GR00/GR03 -> EL00/EL03
'UA02', 'BE00', 'PL00_OFF', 'ITSA_OFF', 'MK00', 'SE02_OFF', 'SE01_OFF',
'ITN1', 'PT00', 'DE00_OFF', 'AL00_OFF', 'DKW1', 'LV00_OFF', 'BE00_OFF',
'EE00_OFF', 'EG00_OFF', 'UK00', 'BA00', 'SI00', 'UK00_OFF', 'DZ00',
'IL00', 'ME00_OFF', 'CY00', 'UKNI', 'DKW1_OFF', 'LT00_OFF', 'DZ00_OFF',
'NOM1', 'FI00', 'LY00', 'EE00', 'SE01', 'FR00', 'SE02', 'ES00_OFF',
'SE03', 'SE04', 'LY00_OFF', 'HU00', 'ITSA', 'ITSI_OFF', 'LB00_OFF',
'ITCA', 'ITCN', 'ITCS', 'ITS1', 'ITS1_OFF', 'ITCA_OFF' ]
dsW['region'] = region_name_fix
#%%
# =============================================================================
# Load in the datafiles with the capacity distributions
# =============================================================================
# Select the distribution to run over
for scenario_capacity in TYNDP_scenarios_capacity:
print(' :'+scenario_capacity)
# Open the scenario to use
dfC = pd.read_csv(PATH_TO_TYNDP+'TYNDP-'+scenario_capacity+'.csv' )
# Read the index, Transpose, convert to xarray Set the index nicely
dsC = dfC.set_index('Country').transpose().to_xarray()
dsC = dsC.rename({'index':'region'})
#%%
# =============================================================================
# Calculate the Production of Energy from RES
# =============================================================================
# Preset a dataset & temp arrays
dsP = xr.Dataset()
pspv=[]
pwon=[]
pwof=[]
REGION_NAME1=[]
REGION_NAME2=[]
# Select a region (the Netherlands is 12/54 in NUTS0)
for REGION in dsC.region.values:
# When a offshore region exist, we also calculate it, otherwise we leave it out
if REGION == 'DKKF':
pwof.append(dsW.WOF.sel(region = REGION) * dsC.OffshoreWind.sel(region=REGION))
REGION_NAME2.append(REGION)
elif REGION+'_OFF' in dsW.region:
# Calculate the output by combining the CF & capacity
pwof.append(dsW.WOF.sel(region = REGION+'_OFF') * dsC.OffshoreWind.sel(region=REGION))
pwon.append(dsW.WON.sel(region = REGION) * dsC.OnshoreWind.sel(region=REGION))
pspv.append(dsW.SPV.sel(region = REGION) * dsC.SolarPV.sel(region=REGION))
# keeping track of coordinates
REGION_NAME1.append(REGION)
REGION_NAME2.append(REGION)
else:
# Calculate the output by combining the CF & capacity
pwon.append(dsW.WON.sel(region = REGION) * dsC.OnshoreWind.sel(region=REGION))
pspv.append(dsW.SPV.sel(region = REGION) * dsC.SolarPV.sel(region=REGION))
# keeping track of coordinates
REGION_NAME1.append(REGION)
# out of the region loop we create new arrays with the info
dsT1 = xr.Dataset()
dsT2 = xr.Dataset()
dsT1['PWON'] = xr.DataArray(pwon, coords=[REGION_NAME1, dsW.time], dims=["region", "time"])
dsT1['PSPV'] = xr.DataArray(pspv, coords=[REGION_NAME1, dsW.time], dims=["region", "time"])
dsT2['PWOF'] = xr.DataArray(pwof, coords=[REGION_NAME2, dsW.time], dims=["region", "time"])
# now we combine the data & fill the gaps with 0
dsN = xr.merge([dsT1, dsT2])
dsN = dsN.fillna(0)
#%%
# =============================================================================
# Save the data
# =============================================================================
# dataset attributes
dsN.attrs.update(
author = 'Laurens Stoop UU/KNMI/TenneT',
created = datetime.datetime.today().strftime('%d-%m-%Y'),
map_area = 'Europe',
region_definition = 'ENTSO-E Marketzones',
TYNDP_scenario = scenario_capacity,
data_source = 'Power generation based on ERA5 reanalysis data, contains modified Copernicus Climate Change Service information [28-02-2022]'
)
# variable attributes
dsN.PWON.attrs.update(
variable = 'Wind onshore electricity generation',
units = 'MWh'
)
dsN.PWOF.attrs.update(
variable = 'Wind offshore electricity generation',
units = 'MWh'
)
dsN.PSPV.attrs.update(
variable = 'Solar PV electricity generation',
units = 'MWh'
)
# Saving the file
dsN.to_netcdf(FOLDER_EV+'ERA5_EV_'+scenario_capacity+'_'+year+'.nc', encoding={'time':{'units':'days since 1900-01-01'}})
dsN.PWOF.to_pandas().transpose().to_csv(FOLDER_EV+'csv/ERA5_EV_WOF_'+scenario_capacity+'_'+year+'.csv')
dsN.PWON.to_pandas().transpose().to_csv(FOLDER_EV+'csv/ERA5_EV_WON_'+scenario_capacity+'_'+year+'.csv')
dsN.PSPV.to_pandas().transpose().to_csv(FOLDER_EV+'csv/ERA5_EV_SPV_'+scenario_capacity+'_'+year+'.csv')
| 8,708 | 38.949541 | 152 |
py
|
EnergyVariables
|
EnergyVariables-main/src/4_TYNDP_demand.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Remade on 25 feb 2022
@author: Laurens Stoop - [email protected]
"""
#%%
# =============================================================================
# Dependencies
# =============================================================================
## Importing modules
import xarray as xr
import numpy as np
import pandas as pd
import datetime
# Versions of the
TYNDP_scenarios_capacity = np.array([
'DE_2030',
'DE_2040',
'GA_2030',
'GA_2040',
'NT_2025',
'NT_2030',
'NT_2040'
])
region_names = [
'AL00','AT00','BA00','BE00','BG00',
'CH00','CY00','CZ00','DE00','DKE1',
'DKKF','DKW1','EE00','EL00','EL03',
'ES00','FI00','FR00','FR15','HR00',
'HU00','IE00','ITCN','ITCS','ITN1',
'ITS1','ITSA','ITSI','LT00',
'LU00',
'LV00','ME00','MK00','MT00','NL00',
'NOM1','NON1','NOS0','PL00','PT00',
'RO00','RS00','SE01','SE02','SE03',
'SE04','SI00','SK00','TR00','UA01',
'UK00','UKNI']
# Read NetCDF
FOLDER_STORE = '/media/DataStager2/ERA5_LoadModel/'
dem_file_loc='/media/DataStager1/Other/ElectricityDemand/TYNDP2020/'
print('NOTIFY: Initialization is complete, Skynet active')
#%%
for scenario_capacity in TYNDP_scenarios_capacity:
DEM=[]
REGION_NAME=[]
print(scenario_capacity)
for region_name in region_names:
print(' : '+region_name+'!')
#%%
# =============================================================================
# Select the relevant Demand data
# =============================================================================
dem=[]
# Append all variables in the timespan
# Attempt for base cases
try:
df = pd.read_excel(dem_file_loc+'Demand_TimeSeries_'+scenario_capacity+'.xlsx', region_name, header=10, nrows=8760)
for year in np.arange(1982,2017):
dem.append(df[year].values)
# In exceptional cases we try something else
except ValueError:
#First exception; luxembourg, add three regions
if region_name == 'LU00':
print('Luxembourg is special'+region_name)
try:
df1 = pd.read_excel(dem_file_loc+'Demand_TimeSeries_'+scenario_capacity+'.xlsx', 'LUB1', header=10, nrows=8760)
df2 = pd.read_excel(dem_file_loc+'Demand_TimeSeries_'+scenario_capacity+'.xlsx', 'LUF1', header=10, nrows=8760)
df3 = pd.read_excel(dem_file_loc+'Demand_TimeSeries_'+scenario_capacity+'.xlsx', 'LUG1', header=10, nrows=8760)
df = df1.fillna(0) + df2.fillna(0) + df3.fillna(0)
except ValueError:
print('LUG/LUB not found in main document ')
df = pd.read_excel(dem_file_loc+'Demand_TimeSeries_'+scenario_capacity+'.xlsx', 'LUG1', header=10, nrows=8760)
for year in np.arange(1982,2017):
dem.append(df[year].values)
# For the Greece regions use correct naming
elif region_name =='EL00':
print('Greece has incorrect region code ')
df = pd.read_excel(dem_file_loc+'Demand_TimeSeries_'+scenario_capacity+'.xlsx', 'GR00', header=10, nrows=8760)
for year in np.arange(1982,2017):
dem.append(df[year].values)
elif region_name =='EL03':
print('Greece has incorrect region code ')
df = pd.read_excel(dem_file_loc+'Demand_TimeSeries_'+scenario_capacity+'.xlsx', 'GR03', header=10, nrows=8760)
for year in np.arange(1982,2017):
dem.append(df[year].values)
else:
print('Tab not found in main document '+region_name)
dem.append(np.zeros([8760,35]))
DEM.append(np.array(dem).flatten())
REGION_NAME.append(region_name)
#%%
# =============================================================================
# Create a time index that is correct (no leap-days)
# =============================================================================
# now create dataframe for the data
dftime = pd.DataFrame(index=pd.date_range("19820101","20161231T23", freq='H'))
# remove leapdays
dftime = dftime[~((dftime.index.month == 2) & (dftime.index.day == 29))]
#%%
# out of the region loop we create new arrays with the info
ds_save = xr.Dataset()
ds_save['DEM'] = xr.DataArray(DEM, coords=[REGION_NAME, dftime.to_xarray().index], dims=["region", "time"])
# Setting the general dataset attributes
ds_save.attrs.update(
author = 'Laurens Stoop UU/KNMI/TenneT',
units = '[MW]',
created = datetime.datetime.today().strftime('%d-%m-%Y'),
map_area = 'Europe',
region_definition = 'ENTSO-E Zones',
data_source = 'TYNDP demand data time series cleaned & unified [28-02-2022]'
)
ds_save.to_netcdf(FOLDER_STORE+'ERA5_DEM_'+scenario_capacity+'.nc', encoding={'time':{'units':'days since 1900-01-01'}})
| 5,569 | 34.031447 | 131 |
py
|
EnergyVariables
|
EnergyVariables-main/src/rtts.py
|
from itertools import chain
import pandas
import sklearn.model_selection
import sklearn.utils
def _generate_region_labels(x, region_length='7h'):
"""Return indices that label contigious groups of length region_length.
:param pandas.DataFrame x: must be time indexed.
:param region_length: must be interpreteable by :func:`pandas.Timedelta`
:return: array of same length as X with group indices
"""
if not (isinstance(x, (pandas.Series, pandas.DataFrame))
and isinstance(x.index, (pandas.DatetimeIndex, pandas.TimedeltaIndex))):
raise ValueError("x must be a time-indexed DataFrame or Series.")
region_length = pandas.Timedelta(region_length)
return ((x.index - min(x.index)) // region_length).values
class RegionShuffleSplit(sklearn.model_selection.GroupShuffleSplit):
def __init__(self, n_splits=4, test_size="default", train_size=None,
random_state=None, region_length='7h'):
"""
Cross-validation iterator for shuffling contiguous regions of `region_length`.
:param int n_splits:
:param test_size:
:type test_size: float, int, None
:param train_size:
:type train_size: float, int, None
:param random_state:
:type random_state: int, Random State instance, None
:param region_length:
:type region_length: Anything interpreteable by `pandas.Timedelta`.
This cross-validation iterator uses :class:`sklearn.model_selection.GroupShuffleSplit`.
The n-th group consists of all datapoints that fall into the n-th time interval of length
`region_length` counting from the first datapoint.
In order to work, the data to be split must be a time-indexed
:class:`pandas.DataFrame`.
The parameters except `region_length` work as in
:class:`~sklearn.model_selection.GroupShuffleSplit`.
Most importantly, if `train_size` or `test_size` are floats,
they describe the portion of groups(!) not of data points.
However this only makes a difference if the groups are different in size
(which can happen when data points are missing).
"""
super().__init__(n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self.region_length = region_length
def split(self, x, y=None, groups=None):
"""
Generate indices to split data into training and test set.
:param pandas.DataFrame x: must be time (or timedelta) indexed.
:param array-like y:
:param groups: will be ignored
:return: train,test
train,test are the indices for the respective split.
"""
groups = _generate_region_labels(x, self.region_length)
return super().split(x, y, groups=groups)
def region_train_test_split(*arrays,
region_length='7h',
test_size='default', train_size=None,
random_state=None, shuffle=True, **options):
"""Split arrays or matrices into random train and test subsets.
Similar to :func:`sklearn.model_validation.train_test_split`,
however the splitting is done under one side condition:
Not the datapoints themselves are shuffled but regions consisting of datapoints falling into time intervals
of a certain length are shuffled.
:param arrays: sequence of indexables with same length / shape[0].
:param region_length:
:type region_length: Anything interpreteable by :class:`pandas.Timedelta`
:param test_size: Same as in :func:`sklearn.model_validation.train_test_split`
:param train_size: Same as in :func:`sklearn.model_validation.train_test_split`
:param random_state: Same as in :func:`sklearn.model_validation.train_test_split`
:param shuffle: Same as in :func:`sklearn.model_validation.train_test_split`
:param options: passed to :func:`sklearn.model_validation.train_test_split` or ignored.
:return: List containing train-test split of inputs.
:rtype: list, length=2 * len(arrays)
Note that if `train_size` or `test_size` are floats,
they describe the portion of groups(!) not of data points.
However this only makes a difference if the groups are different in size
(which can happen when data points are missing).
If region_length is not None and shuffle is True, the first of `arrays` must(!)
be a time_indexed :class:`pandas.DataFrame`.
If region_length is None or shuffle is False, :func:`~sklearn.model_selection.train_test_split`
from sklearn will be called (with `region_length` ignored.)
>>> import pandas
>>> idx = pandas.date_range('2020-01-01', '2020-01-01 9:00', freq='H')
>>> df = pandas.DataFrame(index=idx, data={'a': range(10)})
>>> train, test = region_train_test_split(df, region_length='2h', test_size=0.4, random_state=0)
>>> train.index
DatetimeIndex(['2020-01-01 02:00:00', '2020-01-01 03:00:00',
'2020-01-01 06:00:00', '2020-01-01 07:00:00',
'2020-01-01 08:00:00', '2020-01-01 09:00:00'],
dtype='datetime64[ns]', freq=None)
>>> test.index
DatetimeIndex(['2020-01-01 00:00:00', '2020-01-01 01:00:00',
'2020-01-01 04:00:00', '2020-01-01 05:00:00'],
dtype='datetime64[ns]', freq=None)
"""
if shuffle is False or region_length is None:
return sklearn.model_selection.train_test_split(*arrays, test_size=test_size, train_size=train_size,
random_state=random_state, shuffle=shuffle, **options)
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
if test_size == 'default':
test_size = None
if test_size is None and train_size is None:
test_size = 0.25
arrays = sklearn.utils.indexable(*arrays)
cv = RegionShuffleSplit(test_size=test_size,
train_size=train_size,
random_state=random_state,
region_length=region_length)
train, test = next(cv.split(x=arrays[0]))
return list(chain.from_iterable((sklearn.utils.safe_indexing(a, train),
sklearn.utils.safe_indexing(a, test)) for a in arrays))
| 6,463 | 42.38255 | 111 |
py
|
EnergyVariables
|
EnergyVariables-main/src/5B_Combine_fix-it.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Made on 28 feb 2022
@author: Laurens Stoop - [email protected]
"""
#%%
# =============================================================================
# Dependencies
# =============================================================================
## Importing modules
import xarray as xr
import numpy as np
import pandas as pd
import datetime
import matplotlib.pyplot as plt
# Read NetCDF
FOLDER_STORE = '/media/DataStager2/ERA5_LoadModel/'
FOLDER_EV = '/media/DataStager2/ERA5_EV/'
print('NOTIFY: Initialization is complete, Skynet active')
#%%
# =============================================================================
# Open the files
# =============================================================================
# open the dataset
dD3 = xr.open_dataset(FOLDER_STORE+'ERA5_DEM_DE_2030.nc').DEM
dD4 = xr.open_dataset(FOLDER_STORE+'ERA5_DEM_DE_2040.nc').DEM
dG3 = xr.open_dataset(FOLDER_STORE+'ERA5_DEM_GA_2030.nc').DEM
dG4 = xr.open_dataset(FOLDER_STORE+'ERA5_DEM_GA_2040.nc').DEM
dN2 = xr.open_dataset(FOLDER_STORE+'ERA5_DEM_NT_2025.nc').DEM
dN3 = xr.open_dataset(FOLDER_STORE+'ERA5_DEM_NT_2030.nc').DEM
dN4 = xr.open_dataset(FOLDER_STORE+'ERA5_DEM_NT_2040.nc').DEM
dG4m = dG4.mean()
dG3m = dG3.mean()
dD3m = dD3.mean()
dD4m = dD4.mean()
print('NOTIFY: Data loaded, working on fixes')
#%%
# =============================================================================
# fix the information when one set per scenario is missing
# =============================================================================
#TR00 in GA_2030
TMP=dG4.sel(region='TR00')/dG4m*dG3.mean()
dG3 = xr.where(dG3.region == 'TR00', TMP, dG3)
#AL00 in DE_2040
TMP=dD3.sel(region='AL00')/dD3m*dD4m
dD4 = xr.where(dD4.region == 'AL00', TMP, dD4)
#CH00 in GA_2030
TMP=dG4.sel(region='CH00')/dG4m*dG3.mean()
dG3 = xr.where(dG3.region == 'CH00', TMP, dG3)
#CH00 in DE_2040
TMP=dD3.sel(region='CH00')/dD3m*dD4m
dD4 = xr.where(dD4.region == 'CH00', TMP, dD4)
#FR15 in DE_2040
TMP=dD3.sel(region='FR15')/dD3m*dD4m
dD4 = xr.where(dD4.region == 'FR15', TMP, dD4)
#PL00 in DE_2040
TMP=dD3.sel(region='PL00')/dD3m*dD4m
dD4 = xr.where(dD4.region == 'PL00', TMP, dD4)
# =============================================================================
# Fix the information for double missing
# =============================================================================
#AL00 in GA_2030
TMP=dD3.sel(region='AL00') / dD3m *dG3m
dG3 = xr.where(dG3.region == 'AL00', TMP, dG3)
#AL00 in GA_2040
TMP=dD4.sel(region='AL00') / dD4m * dG4m
dG4 = xr.where(dG4.region == 'AL00', TMP, dG4)
#FR00 in GA_2030
TMP=dD3.sel(region='FR00') / dD3m *dG3m
dG3 = xr.where(dG3.region == 'FR00', TMP, dG3)
#FR00 in GA_2040
TMP=dD4.sel(region='FR00') / dD4m * dG4m
dG4 = xr.where(dG4.region == 'FR00', TMP, dG4)
#FR00 in GA_2030
TMP=dD3.sel(region='FR15') / dD3m *dG3m
dG3 = xr.where(dG3.region == 'FR15', TMP, dG3)
#FR00 in GA_2040
TMP=dD4.sel(region='FR15') / dD4m * dG4m
dG4 = xr.where(dG4.region == 'FR15', TMP, dG4)
#PL00 in GA_2030
TMP=dD3.sel(region='PL00') / dD3m *dG3m
dG3 = xr.where(dG3.region == 'PL00', TMP, dG3)
#PL00 in GA_2040
TMP=dD4.sel(region='PL00') / dD4m * dG4m
dG4 = xr.where(dG4.region == 'PL00', TMP, dG4)
#TR00 in DE_2030
TMP=dG3.sel(region='TR00') /dG3m * dD3m
dD3 = xr.where(dD3.region == 'TR00', TMP, dD3)
#TR00 in DE_2040
TMP=dG4.sel(region='TR00') / dG4m * dD4m
dD4 = xr.where(dD4.region == 'TR00', TMP, dD4)
# =============================================================================
# And then there is ukrain
# =============================================================================
#UA01 in GA_2030
TMP=dN3.sel(region='UA01') / dN3.mean() *dG3m
dG3 = xr.where(dG3.region == 'UA01', TMP, dG3)
#UA01 in GA_2040
TMP=dN4.sel(region='UA01') / dN4.mean() * dG4m
dG4 = xr.where(dG4.region == 'UA01', TMP, dG4)
#UA01 in DE_2030
TMP=dN3.sel(region='UA01') /dN3.mean() * dD3m
dD3 = xr.where(dD3.region == 'UA01', TMP, dD3)
#UA01 in DE_2040
TMP=dN4.sel(region='UA01') / dN4.mean() * dD4m
dD4 = xr.where(dD4.region == 'UA01', TMP, dD4)
# =============================================================================
# Fixing Hungary
# =============================================================================
#UA01 in DE_2040
TMP=dD4.sel(region='HU00') / 100.
dD4 = xr.where(dD4.region == 'HU00', TMP, dD4)
print('NOTIFY: Fixes complete, now cleaning the files')
#%%
# =============================================================================
# Cleaning time
# =============================================================================
dD3 = dD3.sel(time=slice('1982-01-01', '2010-12-31'))
dD4 = dD4.sel(time=slice('1982-01-01', '2010-12-31'))
dG3 = dG3.sel(time=slice('1982-01-01', '2010-12-31'))
dG4 = dG4.sel(time=slice('1982-01-01', '2010-12-31'))
dN2 = dN2.sel(time=slice('1982-01-01', '2010-12-31'))
dN3 = dN3.sel(time=slice('1982-01-01', '2010-12-31'))
dN4 = dN4.sel(time=slice('1982-01-01', '2010-12-31'))
dD3 = dD3.dropna(dim='region')
dD4 = dD4.dropna(dim='region')
dG3 = dG3.dropna(dim='region')
dG4 = dG4.dropna(dim='region')
dN2 = dN2.dropna(dim='region')
dN3 = dN3.dropna(dim='region')
dN4 = dN4.dropna(dim='region')
print('NOTIFY: Saving initiated')
#%%
# =============================================================================
# Data saving
# =============================================================================
# Saving the file
dD3.to_netcdf(FOLDER_EV+'ERA5_EV_DEM_DE_2030.nc', encoding={'time':{'units':'days since 1900-01-01'}})
dD4.to_netcdf(FOLDER_EV+'ERA5_EV_DEM_DE_2040.nc', encoding={'time':{'units':'days since 1900-01-01'}})
dG3.to_netcdf(FOLDER_EV+'ERA5_EV_DEM_GA_2030.nc', encoding={'time':{'units':'days since 1900-01-01'}})
dG4.to_netcdf(FOLDER_EV+'ERA5_EV_DEM_GA_2040.nc', encoding={'time':{'units':'days since 1900-01-01'}})
dN2.to_netcdf(FOLDER_EV+'ERA5_EV_DEM_NT_2025.nc', encoding={'time':{'units':'days since 1900-01-01'}})
dN3.to_netcdf(FOLDER_EV+'ERA5_EV_DEM_NT_2030.nc', encoding={'time':{'units':'days since 1900-01-01'}})
dN4.to_netcdf(FOLDER_EV+'ERA5_EV_DEM_NT_2040.nc', encoding={'time':{'units':'days since 1900-01-01'}})
dD3.to_pandas().transpose().to_csv(FOLDER_EV+'csv/ERA5_EV_DEM_DE_2030.csv')
dD4.to_pandas().transpose().to_csv(FOLDER_EV+'csv/ERA5_EV_DEM_DE_2040.csv')
dG3.to_pandas().transpose().to_csv(FOLDER_EV+'csv/ERA5_EV_DEM_GA_2030.csv')
dG4.to_pandas().transpose().to_csv(FOLDER_EV+'csv/ERA5_EV_DEM_GA_2040.csv')
dN2.to_pandas().transpose().to_csv(FOLDER_EV+'csv/ERA5_EV_DEM_NT_2025.csv')
dN3.to_pandas().transpose().to_csv(FOLDER_EV+'csv/ERA5_EV_DEM_NT_2030.csv')
dN4.to_pandas().transpose().to_csv(FOLDER_EV+'csv/ERA5_EV_DEM_NT_2040.csv')
| 6,718 | 32.427861 | 102 |
py
|
EnergyVariables
|
EnergyVariables-main/src/1B_output_viz.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 27 09:34:10 2022
@author: Laurens Stoop - [email protected]
"""
#%%
# =============================================================================
# Dependencies
# =============================================================================
## Importing modules
import xarray as xr
import numpy as np
import regionmask
import geopandas as gpd
import datetime
import pandas as pd
import matplotlib.pyplot as plt
import os.path
# Location of datasets
FOLDER_EV = '/media/DataStager2/ERA5_CF_MZ/'
FOLDER_PECD = '/media/DataStager1/Other/PECDv3.1/'
year = '2010'
zone = 'NL00'
variable = 'SolarPV'
#%%
# =============================================================================
# Loading the EV & PECD data
# =============================================================================
# EV versions
dsR = xr.open_dataset(FOLDER_EV+'ERA5_CF_MZ_RM_'+year+'.nc')
dsW = xr.open_dataset(FOLDER_EV+'ERA5_CF_MZ_WM_'+year+'.nc')
# Clean the leap-days out of the data
dsR = dsR.sel(time=~((dsR.time.dt.month == 2) & (dsR.time.dt.day == 29)))
dsW = dsW.sel(time=~((dsW.time.dt.month == 2) & (dsW.time.dt.day == 29)))
# Load the PECD
dfP = pd.read_csv(FOLDER_PECD+'PECD_'+variable+'_2030_edition 2021.3_'+zone+'.csv', sep=',', skiprows=10, header=[0]) #, names=['Code', 'Type', 'Capacity'])
# # Stack the PECD data columsn
# part1 = df.iloc[:,0:2]
# part2 = df.iloc[:,2:4]
# new_columns = ["c", "d"]
# part1.columns = new_columns
# part2.columns = new_columns
# print pd.concat([part1, part2], ignore_index=True)
#%%
# =============================================================================
# make some figures
# =============================================================================
plt.figure(figsize=(8,8))
ax = plt.axes()
plt.scatter(dfP['2010'], dsW.SPV.sel(region=zone, time=dsW.time.dt.year == 2010), alpha = 0.2, facecolors='b', s=1, label="CF-Weighted")
plt.scatter(dfP['2010'], dsR.SPV.sel(region=zone, time=dsR.time.dt.year == 2010), alpha = 0.2, facecolors='g', s=1, label="Regular")
plt.legend(loc="upper left", markerscale=8)
plt.xlabel('PECDV3.1 data')
plt.ylabel('ERA5 data')
plt.title(zone+' '+variable)
plt.ylim(0, 1)
plt.xlim(0, 1)
#%%
# =============================================================================
# Bias adjustment
# =============================================================================
dfP.drop(['Date', 'Hour'], axis=1).mean().mean()
| 2,478 | 28.86747 | 156 |
py
|
EnergyVariables
|
EnergyVariables-main/src/5_Combine_PWT-Dem.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Remade on 25 feb 2022
@author: Laurens Stoop - [email protected]
"""
#%%
# =============================================================================
# Dependencies
# =============================================================================
## Importing modules
import xarray as xr
import numpy as np
import pandas as pd
import datetime
import matplotlib.pyplot as plt
# Versions of the
TYNDP_scenarios_capacity = np.array([
'DE_2030',
'DE_2040',
'GA_2030',
'GA_2040',
'NT_2025',
'NT_2030',
'NT_2040'
])
region_names = [
'AL00','AT00','BA00','BE00','BG00',
'CH00','CY00','CZ00','DE00','DKE1',
'DKKF','DKW1','EE00','EL00','EL03',
'ES00','FI00','FR00','FR15','HR00',
'HU00','IE00','ITCN','ITCS','ITN1',
'ITS1','ITSA','ITSI','LT00',
'LU00',
'LV00','ME00','MK00','MT00','NL00',
'NOM1','NON1','NOS0','PL00','PT00',
'RO00','RS00','SE01','SE02','SE03',
'SE04','SI00','SK00','TR00','UA01',
'UK00','UKNI']
region_names_fix = ['MT00', 'ITSI', 'LB00', 'PL00', 'LT00', 'LU00', 'BG00', 'NON1', 'CZ00',
'SK00', 'CH00', 'SY00', 'FR15', 'RS00', 'NOS0', 'IE00', 'DE00', 'AT00',
'MD00', 'ES00', 'AL00', 'RO00', 'ME00', 'HR00', 'DKE1', 'LV00', 'NL00',
'TR00', 'MA00', 'EL00', 'EL03', 'TN00', 'EG00', 'UA01', 'UA02', 'BE00', # Changed GR to EL
'MK00', 'ITN1', 'PT00', 'DKW1', 'UK00', 'BA00', 'SI00', 'DZ00', 'IL00',
'CY00', 'UKNI', 'NOM1', 'FI00', 'LY00', 'EE00', 'SE01', 'FR00', 'SE02',
'SE03', 'SE04', 'HU00', 'ITSA', 'ITCA', 'ITCN', 'ITCS', 'ITS1']
# Read NetCDF
FOLDER_STORE = '/media/DataStager2/ERA5_LoadModel/'
FOLDER_PWT = '/media/DataStager2/ERA5_PWT/'
print('NOTIFY: Initialization is complete, Skynet active')
#%%
# =============================================================================
# Open the files
# =============================================================================
scenario_capacity2 = ['DE_2030']
region_name = 'FR00'
# for scenario_capacity in TYNDP_scenarios_capacity:
for scenario_capacity in scenario_capacity2:
# open data
dsd = xr.open_dataset(FOLDER_STORE+'ERA5_DEM_'+scenario_capacity+'.nc')
dst = xr.open_mfdataset(FOLDER_PWT+'ERA5_PWT_*.nc' )
# make sure to only use similair time period
dst = dst.sel(time=slice('1982-01-01', '2016-12-31'))
dst = dst.sel(time=~((dst.time.dt.month == 2) & (dst.time.dt.day == 29)))
dst['region'] = region_names_fix
# create one dataset
ds = xr.Dataset()
ds['PWT'] = dst.PWT
ds['DEM'] = dsd.DEM
# some regions are not listed
ds.fillna(0)
# Setting the general dataset attributes
ds.attrs.update(
author = 'Laurens Stoop UU/KNMI/TenneT',
created = datetime.datetime.today().strftime('%d-%m-%Y'),
map_area = 'Europe',
region_definition = 'ENTSO-E Zones',
data_source = 'TYNDP demand data and population weighted ERA5 temperature [28-02-2022]'
)
# ds.to_netcdf(FOLDER_STORE+'ERA5_LoadModelData_'+scenario_capacity+'.nc', encoding={'time':{'units':'days since 1900-01-01'}})
#%%
# =============================================================================
# Make some figures
# =============================================================================
ds = ds.sel(time=slice('2010-01-01', '2016-12-31'),drop=True)
plt.close('all')
fig = plt.figure()
plt.scatter(dst.PWT.sel(region=region_name),dsd.DEM.sel(region=region_name), s=0.3)
plt.xlabel('Population weighted temperature [degC]')
plt.ylabel('Estimated load [MW]')
plt.title('Time slice 2010-2016')
plt.tight_layout()
# plt.ioff()
# plt.savefig('/home/stoop/Documents/Project/EnergyVariables-EV/results/figures/DemandPWT/DemPwT_'+scenario_capacity+'_'+region_name+'.png')
#%%
# =============================================================================
# Filtered view day of week
# =============================================================================
ds1 = ds.where(ds['time.dayofweek'] == 0, drop=True)
ds2 = ds.where(ds['time.dayofweek'] == 1, drop=True)
ds3 = ds.where(ds['time.dayofweek'] == 2, drop=True)
ds4 = ds.where(ds['time.dayofweek'] == 3, drop=True)
ds5 = ds.where(ds['time.dayofweek'] == 4, drop=True)
ds6 = ds.where(ds['time.dayofweek'] == 5, drop=True)
ds7 = ds.where(ds['time.dayofweek'] == 6, drop=True)
fig = plt.figure()
plt.scatter(ds1.PWT.sel(region=region_name),ds1.DEM.sel(region=region_name), s=0.1)
plt.scatter(ds2.PWT.sel(region=region_name),ds2.DEM.sel(region=region_name), s=0.1)
plt.scatter(ds3.PWT.sel(region=region_name),ds3.DEM.sel(region=region_name), s=0.1)
plt.scatter(ds4.PWT.sel(region=region_name),ds4.DEM.sel(region=region_name), s=0.1)
plt.scatter(ds5.PWT.sel(region=region_name),ds5.DEM.sel(region=region_name), s=0.1)
plt.scatter(ds6.PWT.sel(region=region_name),ds6.DEM.sel(region=region_name), s=0.1)
plt.scatter(ds7.PWT.sel(region=region_name),ds7.DEM.sel(region=region_name), s=0.1)
plt.xlabel('Population weighted temperature [degC]')
plt.ylabel('Estimated load [MW]')
plt.title('Time slice 2010-2016')
plt.tight_layout()
# plt.ioff()
# plt.savefig('/home/stoop/Documents/Project/EnergyVariables-EV/results/figures/DemandPWT/DemPwT_'+scenario_capacity+'_'+region_name+'_dow.png')
#%%
# =============================================================================
# Filtered view; hour of day
# =============================================================================
ds1 = ds.where(ds['time.hour'] == 0, drop=True)
ds2 = ds.where(ds['time.hour'] == 3, drop=True)
ds3 = ds.where(ds['time.hour'] == 6, drop=True)
ds4 = ds.where(ds['time.hour'] == 9, drop=True)
ds5 = ds.where(ds['time.hour'] == 12, drop=True)
ds6 = ds.where(ds['time.hour'] == 15, drop=True)
ds7 = ds.where(ds['time.hour'] == 18, drop=True)
ds8 = ds.where(ds['time.hour'] == 21, drop=True)
fig = plt.figure()
plt.scatter(ds1.PWT.sel(region=region_name),ds1.DEM.sel(region=region_name), s=0.5, c='darkblue')
plt.scatter(ds8.PWT.sel(region=region_name),ds8.DEM.sel(region=region_name), s=0.5, c='darkblue')
plt.scatter(ds2.PWT.sel(region=region_name),ds2.DEM.sel(region=region_name), s=0.5, c='blue')
plt.scatter(ds7.PWT.sel(region=region_name),ds7.DEM.sel(region=region_name), s=0.5, c='blue')
plt.scatter(ds3.PWT.sel(region=region_name),ds3.DEM.sel(region=region_name), s=0.5, c='green')
plt.scatter(ds6.PWT.sel(region=region_name),ds6.DEM.sel(region=region_name), s=0.5, c='green')
plt.scatter(ds4.PWT.sel(region=region_name),ds4.DEM.sel(region=region_name), s=0.5, c='red')
plt.scatter(ds5.PWT.sel(region=region_name),ds5.DEM.sel(region=region_name), s=0.5, c='red')
plt.xlabel('Population weighted temperature [degC]')
plt.ylabel('Estimated load [MW]')
plt.title('Time slice 2010-2016')
plt.tight_layout()
# plt.ioff()
plt.savefig('/home/stoop/Documents/Project/EnergyVariables-EV/results/figures/DemandPWT/DemPwT_'+scenario_capacity+'_'+region_name+'_hour.png')
| 7,123 | 36.104167 | 144 |
py
|
EnergyVariables
|
EnergyVariables-main/src/1_ERA5_marketzones.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Restructered on Sun 26 Jan 2022 17:04
@author: Laurens Stoop - [email protected]
Following example by Matteo de Felice: http://www.matteodefelice.name/post/aggregating-gridded-data/
"""
#%%
# =============================================================================
# Dependencies
# =============================================================================
## Importing modules
import xarray as xr
import numpy as np
import regionmask
import geopandas as gpd
import datetime
# import pandas as pd
import matplotlib.pyplot as plt
import os.path
# Select the years to run
years = np.array([
# '1950', '1951', '1952',
# '1953', '1954', '1955',
# '1956', '1957', '1958',
# '1959', '1960', '1961',
# '1962', '1963', '1964',
# '1965', '1966', '1967',
# '1968', '1969', '1970',
# '1971', '1972', '1973',
# '1974', '1975', '1976',
# '1977', '1978',
# '1979', '1980', '1981',
# '1982', '1983', '1984',
# '1985', '1986', '1987',
# '1988', '1989',
# '1990',
# '1991', '1992', '1993',
# '1994', '1995', '1996',
# '1997', '1998', '1999',
# '2000', '2001', '2002',
# '2003', '2004', '2005',
# '2006', '2007', '2008',
# '2009', '2010', '2011',
# '2012', '2013', '2014',
# '2015', '2016', '2017',
# '2018', '2019',
'2020'
])
# Set the path for the data
PATH_TO_NUTS0 = '/media/DataStager1/Other/RegionDefinitions/ENTSO-E_StudyZones/DTU-PECD22-Polygons_SZ_VF2021.shp'
# PATH_TO_NUTS1 = '/media/DataStager1/Other/RegionDefinitions/ENTSO-E_StudyZones/DTU-PECD22-Polygons_VF2021.shp'
# Read NetCDF
# FOLDER_WITH_NETCDF = '/media/DataStager1/ERA5_CF/'
FOLDER_WITH_NETCDF = '/media/DataGate2/ERA5/CF/'
# FOLDER_STORE = '/media/DataStager2/ERA5_EV/'
FOLDER_STORE = '/media/DataGate2/ERA5/CF_MZ/'
# open teh mean file
mean_file = '/media/DataStager1/ERA5mean1991_2020.nc'
dsm = xr.open_dataset(mean_file)
dsm = dsm.mean(dim='time')
dsm = dsm.rename({'longitude': 'lon','latitude': 'lat'})
print('NOTIFY: Initialization is complete, Skynet active')
#%%
# =============================================================================
# Load in the base shapefile
# =============================================================================
# Load the shapefile
nuts0 = gpd.read_file(PATH_TO_NUTS0)
# nuts1 = gpd.read_file(PATH_TO_NUTS1)
# There are regions we do not consider
not_considered_nuts0 = [
'JO00', 'JO00_OFF', # Jordany
# 'MA00', 'MA00_OFF', # Marocco
# 'SY00', 'SY00_OFF', # Syria
# 'TN00', 'TN00_OFF', # Tunisia
'IS00', 'IS00_OFF', # Iceland
# 'IL00', 'IL00_OFF', # Israel
'PS00', 'PS00_OFF', # Palistine & Gaza
# 'EG00', 'EG00_OFF', # Egypt
# 'DZ00', 'DZ00_OFF', # Algeria
# 'LY00', 'LY00_OFF', # Libya
#
# Regions not considered resolution or model constrains
'SI00_OFF', # Slovenia offshore is to small for ERA5 data
'BA00_OFF', # Bosnia and Herzegovina offshore region to small for ERA5 data
'MT00', # Malta is to small for data on the island
]
# Now set all nuts0 regions we do not consider to NaN's
for NC in not_considered_nuts0:
nuts0 = nuts0.where(nuts0['Study Zone'] != NC)
# Removal of all NaN's from the table
nuts0 = nuts0.dropna()
# # There is an empty LY00 zone in there
# nuts1.iloc[246]
# nuts1 = nuts1.drop(index=246)
# To check some info you could read the headers of the shapefiles
# nuts0.head() # to check the contents --> 121 on-/offshore definitions
# nuts1.head() # to check the contents --> 262 on-/offshore definitions
#%%
# =============================================================================
# Load in the datafiles them self
# =============================================================================
# The mega loop
for year in years:
# for month in ['01']: #, '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']:
# Define the file name
file_save = FOLDER_STORE+'ERA5_CF_MZ_RM_'+year+'.nc'
# # Define the file name
# file_save_solar = FOLDER_STORE+'ERA5_EV_mz_20_solar_'+year+month+'.nc'
# file_save_windoff = FOLDER_STORE+'ERA5_EV_mz_20_windoff_'+year+month+'.nc'
# file_save_windon = FOLDER_STORE+'ERA5_EV_mz_20_windon_'+year+month+'.nc'
# Check if file allready exist, then get out
if os.path.isfile(file_save) == True:
# Tell us the file exist
print('NOTIFY: Allready applied for year '+year+'!')
# IF the file doesn't exist, apply the distribution
elif os.path.isfile(file_save) == False:
print('NOTIFY: Working on year '+year+'!')
# Load in the NetCDF
ds = xr.open_mfdataset(FOLDER_WITH_NETCDF+'ERA5_CF_'+year+'*.nc') #, chunks = {'time': 8760})
# remaming the coordinates
ds = ds.rename({'longitude': 'lon','latitude': 'lat'})
# Adding the weights
weights_on = dsm.windCF_on
weights_on.name = 'weights'
weights_of = dsm.windCF_off
weights_of.name = 'weights'
weights_pv = dsm.solarCF
weights_pv.name = 'weights'
#%%
# =============================================================================
# Now we define the regionmask and to later apply it
# =============================================================================
# CALCULATE MASK
SZ0_mask_poly = regionmask.Regions(name = 'ENTSO-E_StudyZone0_Mask', numbers = np.arange(0,len(nuts0)), abbrevs = list(nuts0['Study Zone']), outlines = list(nuts0.geometry.values[i] for i in np.arange(0,len(nuts0)))) # names = list(nuts0['Study Zone']),
# SZ1_mask_poly = regionmask.Regions(name = 'ENTSO-E_StudyZone1_Mask', numbers = np.arange(0,len(nuts1)), abbrevs = list(nuts1['Code']), outlines = list(nuts1.geometry.values[i] for i in np.arange(0,len(nuts0)))) # names = list(nuts1['Study Zone']), # print(nuts_mask_poly)
# Create the mask
mask = SZ0_mask_poly.mask(ds.isel(time = 0), method = None)
# mask = SZ1_mask_poly.mask(ds.isel(time = 0), method = None)
# mask # To check the contents of the mask defined
#%%
# =============================================================================
# Now selecting a region to select the data
# =============================================================================
# Prepare a dataset for filling with regional mean data
ds_solarCF = xr.Dataset()
ds_windCF_off = xr.Dataset()
ds_windCF_on = xr.Dataset()
SPV=[]
WON=[]
WOF=[]
wSPV=[]
wWON=[]
wWOF=[]
REGION_NAME=[]
# Select a region (the Netherlands is 12/54 in NUTS0)
for ID_REGION in np.arange(0,len(nuts0)):
# for ID_REGION in np.arange(0,len(nuts1)):
# for ID_REGION in [7, 36, 48, 49, 50, 92, 95, 97, 99, 100]: # the interesting countries
# Determine the region name
region_name = nuts0.iloc[ID_REGION]['Study Zone']
print(' : ('+str(ID_REGION+1)+'/112) '+region_name+'!')
# Select the lat/lon combo's as vector to use later
lat = mask.lat.values
lon = mask.lon.values
# We select the region under consideration
sel_mask = mask.where(mask == ID_REGION).values
# Select the specific lat/lon combo that is the minimum bounding box
id_lon = lon[np.where(~np.all(np.isnan(sel_mask), axis=0))]
id_lat = lat[np.where(~np.all(np.isnan(sel_mask), axis=1))]
# This is the fancy loop by Matteo that uses the compute dask function to load and slice the data
out_sel = ds.sel(lat = slice(id_lat[0], id_lat[-1]), lon = slice(id_lon[0], id_lon[-1])).compute().where(mask == ID_REGION)
#%%
# =============================================================================
# A quick figure for the sanity checks
# =============================================================================
# plt.figure(figsize=(12,8))
# ax = plt.axes()
# out_sel.windCF_on.isel(time = 708).plot(ax = ax)
# nuts0.plot(ax = ax, alpha = 0.8, facecolor = 'none')
#%%
# =============================================================================
# Regional mean for saving data
# =============================================================================
# # # Region wide mean
WOF.append(out_sel.windCF_off.groupby('time').mean(...).values)
WON.append(out_sel.windCF_on.groupby('time').mean(...).values)
SPV.append(out_sel.solarCF.groupby('time').mean(...).values)
#
# REDO THIS BIT as now 3x weighted meand
# Weighted mean
out_sel_of = out_sel.weighted(weights_of)
wWOF.append(out_sel_of.mean(("lat","lon")).windCF_off.values)
out_sel_on = out_sel.weighted(weights_on)
wWON.append(out_sel_on.mean(("lat","lon")).windCF_on.values)
out_sel_pv = out_sel.weighted(weights_pv)
wSPV.append(out_sel_pv.mean(("lat","lon")).solarCF.values)
# weights = np.cos(np.deg2rad(air.lat))
# weights.name = "weights"
# weights
# air_weighted = air.weighted(weights)
# air_weighted
# weighted_mean = air_weighted.mean(("lon", "lat"))
# weighted_mean
# Just a list of names
REGION_NAME.append(region_name)
# out of the region loop we create new arrays with the info
ds_save = xr.Dataset()
ds_save['WOF'] = xr.DataArray(WON, coords=[REGION_NAME, ds.time], dims=["region", "time"])
ds_save['WON'] = xr.DataArray(WOF, coords=[REGION_NAME, ds.time], dims=["region", "time"])
ds_save['SPV'] = xr.DataArray(SPV, coords=[REGION_NAME, ds.time], dims=["region", "time"])
# out of the region loop we create new arrays with the info
dsw_save = xr.Dataset()
dsw_save['WOF'] = xr.DataArray(wWON, coords=[REGION_NAME, ds.time], dims=["region", "time"])
dsw_save['WON'] = xr.DataArray(wWOF, coords=[REGION_NAME, ds.time], dims=["region", "time"])
dsw_save['SPV'] = xr.DataArray(wSPV, coords=[REGION_NAME, ds.time], dims=["region", "time"])
# # Offshore region only have wind
# if region_name == 'MT00_OFF':
# # fixing the to small region for Malta where we do need data
# ds_windCF_off[region_name] = out_sel.windCF_off.groupby('time').mean(...)
# ds_windCF_on['MT00'] = out_sel.windCF_on.groupby('time').mean(...)
# ds_solarCF['MT00'] = out_sel.solarCF.groupby('time').mean(...)
# elif region_name[-4:] == '_OFF':
# # Save the regional mean into the main dataset under the region's name
# ds_windCF_off[region_name] = out_sel.windCF_off.groupby('time').mean(...)
# # Non-offshore regions have wind and solar installed
# else:
# # Save the regional mean of the onshore wind and solar CF's
# ds_windCF_on[region_name] = out_sel.windCF_on.groupby('time').mean(...)
# ds_solarCF[region_name] = out_sel.solarCF.groupby('time').mean(...)
#%%
# =============================================================================
# Setting units & saving
# =============================================================================
# Setting the general dataset attributes
ds_save.attrs.update(
author = 'Laurens Stoop UU/KNMI/TenneT',
units = '[0-1]',
created = datetime.datetime.today().strftime('%d-%m-%Y'),
map_area = 'Europe',
region_definition = 'ENTSO-E StudyZones at national level aggregated',
data_source = 'Capacity factors based on ERA5 reanalysis data, contains modified Copernicus Climate Change Service information [28-02-2022]'
)
dsw_save.attrs.update(
author = 'Laurens Stoop UU/KNMI/TenneT',
units = '[0-1]',
created = datetime.datetime.today().strftime('%d-%m-%Y'),
map_area = 'Europe',
region_definition = 'ENTSO-E StudyZones at national level aggregated',
data_source = 'Capacity factors based on ERA5 reanalysis data, contains modified Copernicus Climate Change Service information [28-02-2022]'
)
# Saving the file
ds_save.to_netcdf(file_save, encoding={'time':{'units':'days since 1900-01-01'}})
dsw_save.to_netcdf(FOLDER_STORE+'ERA5_CF_MZ_WM_'+year+'.nc', encoding={'time':{'units':'days since 1900-01-01'}})
# # Setting the general dataset attributes
# ds_solarCF.attrs.update(
# author = 'Laurens Stoop UU/KNMI/TenneT',
# variables = 'Solar PH capacity factor for a specific region',
# units = '[0-1]',
# created = datetime.datetime.today().strftime('%d-%m-%Y'),
# map_area = 'Europe',
# region_definition = 'ENTSO-E StudyZones at national level aggregated',
# data_source = 'Capacity factors based on ERA5 reanalysis data, contains modified Copernicus Climate Change Service information [31-05-2021]'
# )
# # Setting the general dataset attributes
# ds_windCF_off.attrs.update(
# author = 'Laurens Stoop UU/KNMI/TenneT',
# variables = 'Wind Offshore capacity factor for a specific region',
# units = '[0-1]',
# created = datetime.datetime.today().strftime('%d-%m-%Y'),
# map_area = 'Europe',
# region_definition = 'ENTSO-E StudyZones at national level aggregated',
# data_source = 'Capacity factors based on ERA5 reanalysis data, contains modified Copernicus Climate Change Service information [31-05-2021]'
# )
# # Setting the general dataset attributes
# ds_windCF_on.attrs.update(
# author = 'Laurens Stoop UU/KNMI/TenneT',
# variables = 'Wind onshore capacity factor for a specific region',
# units = '[0-1]',
# created = datetime.datetime.today().strftime('%d-%m-%Y'),
# map_area = 'Europe',
# region_definition = 'ENTSO-E StudyZones at national level aggregated',
# data_source = 'Capacity factors based on ERA5 reanalysis data, contains modified Copernicus Climate Change Service information [31-05-2021]'
# )
# # Saving the file
# ds_solarCF.to_netcdf(file_save_solar, encoding={'time':{'units':'days since 1900-01-01'}})
# ds_windCF_off.to_netcdf(file_save_windoff, encoding={'time':{'units':'days since 1900-01-01'}})
# ds_windCF_on.to_netcdf(file_save_windon, encoding={'time':{'units':'days since 1900-01-01'}})
| 16,160 | 40.122137 | 288 |
py
|
EnergyVariables
|
EnergyVariables-main/src/archive/1_ERA5_ENTSO-E_StudyZones.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Restructered on Thu 19 May 2021 21:40
@author: Laurens Stoop - [email protected]
Following example by Matteo de Felice: http://www.matteodefelice.name/post/aggregating-gridded-data/
"""
#%%
# =============================================================================
# Dependencies
# =============================================================================
## Importing modules
import xarray as xr
import numpy as np
import regionmask
import geopandas as gpd
# import pandas as pd
import matplotlib.pyplot as plt
# Set the path for the data
PATH_TO_NUTS0 = '/media/DataDrive/Other/RegionDefinitions/ENTSO-E_StudyZones/DTU-PECD22-Polygons_SZ_VF2021.shp'
PATH_TO_NUTS1 = '/media/DataDrive/Other/RegionDefinitions/ENTSO-E_StudyZones/DTU-PECD22-Polygons_VF2021.shp'
# Read NetCDF
FOLDER_WITH_NETCDF = '/media/DataGate3/ERA5-EU_CF/'
print('NOTIFY: Initialization is complete, Skynet active')
#%%
# =============================================================================
# Load in the base shapefile
# =============================================================================
# Load the shapefile
nuts0 = gpd.read_file(PATH_TO_NUTS0)
nuts1 = gpd.read_file(PATH_TO_NUTS1)
# There is an abandoned LY00 zone in there
# nuts1.iloc[246]
nuts1 = nuts1.drop(index=246)
# To check some info you could read the headers of the shapefiles
# nuts0.head() # to check the contents --> 121 on-/offshore definitions
# nuts1.head() # to check the contents --> 262 on-/offshore definitions
#%%
# =============================================================================
# Load in the datafiles them self
# =============================================================================
# Load in the NetCDF
ds = xr.open_mfdataset(FOLDER_WITH_NETCDF+'ERA5-EU_CF_2011.nc') #, chunks = {'time': 8760})
#%%
# =============================================================================
# Now we define the regionmask and apply it
# =============================================================================
# CALCULATE MASK
# SZ0_mask_poly = regionmask.Regions(name = 'ENTSO-E_StudyZone0_Mask', numbers = list(range(0,121)), abbrevs = list(nuts0['Study Zone']), outlines = list(nuts0.geometry.values[i] for i in range(0,121))) # names = list(nuts0['Study Zone']),
SZ1_mask_poly = regionmask.Regions(name = 'ENTSO-E_StudyZone1_Mask', numbers = list(range(0,262)), abbrevs = list(nuts1['Code']), outlines = list(nuts1.geometry.values[i] for i in range(0,262))) # names = list(nuts1['Study Zone']),
# print(nuts_mask_poly)
# Create the mask
mask = SZ1_mask_poly.mask(ds.isel(time = 0), method = None)
# mask # To check the contents of the mask defined
# =============================================================================
# A quick figure for sanity checks
# =============================================================================
plt.figure(figsize=(12,8))
ax = plt.axes()
mask.plot(ax = ax)
nuts1.plot(ax = ax, alpha = 0.8, facecolor = 'none', lw = 1)
#%%
# =============================================================================
# Now selecting a region to select the data
# =============================================================================
# Select a region (the Netherlands is 38/148)
ID_REGION = 150
# Select the lat/lon combo's as vector to use later
lat = mask.lat.values
lon = mask.lon.values
# We select the region under consideration
sel_mask = mask.where(mask == ID_REGION).values
# Select the specific lat/lon combo that is the minimum bounding box
id_lon = lon[np.where(~np.all(np.isnan(sel_mask), axis=0))]
id_lat = lat[np.where(~np.all(np.isnan(sel_mask), axis=1))]
# This is the fancy loop by Matteo that uses the compute dask function to load and slice the data
out_sel = ds.sel(lat = slice(id_lat[0], id_lat[-1]), lon = slice(id_lon[0], id_lon[-1])).compute().where(mask == ID_REGION)
# =============================================================================
# A quick figure for the sanity checks
# =============================================================================
plt.figure(figsize=(12,8))
ax = plt.axes()
out_sel.solarCF.isel(time = 4140).plot(ax = ax)
nuts1.plot(ax = ax, alpha = 0.8, facecolor = 'none')
#%%
# =============================================================================
# Regional mean for saving data
# =============================================================================
# Now calculate the regional mean
x = out_sel.solarCF.groupby('time').mean(...)
x.plot()
# # Saving function
# x.t2m.to_pandas().to_csv('average-temperature.csv', header = ['t2m'])
| 4,650 | 34.776923 | 239 |
py
|
EnergyVariables
|
EnergyVariables-main/src/archive/2_NUTS0_TYNDP.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Remade on Sun 30 May 2021 22:32
@author: Laurens Stoop - [email protected]
"""
#%%
# =============================================================================
# Dependencies
# =============================================================================
## Importing modules
import xarray as xr
import numpy as np
# import regionmask
# import geopandas as gpd
import datetime
import pandas as pd
# import matplotlib.pyplot as plt
import os.path
# Select the years to run
years = np.array([
# '1950', '1951', '1952',
# '1953', '1954', '1955',
# '1956', '1957', '1958',
# '1959', '1960', '1961',
# '1962', '1963', '1964',
# '1965', '1966', '1967',
# '1968', '1969', '1970',
# '1971', '1972', '1973',
# '1974', '1975', '1976',
# '1977', '1978',
# '1979', '1980', '1981',
# '1982', '1983', '1984',
# '1985', '1986', '1987',
# '1988', '1989', '1990',
# '1991', '1992', '1993',
# '1994', '1995', '1996',
# '1997', '1998', '1999',
# '2000', '2001', '2002',
# '2003', '2004', '2005',
# '2006', '2007', '2008',
# '2009', '2010', '2011',
# '2012', '2013',
'2014',
'2015', '2016', '2017',
'2018', '2019',
#'2020'
])
# Versions of the TYNDP
CD_TYNDP_input = np.array([
'DE_2030',
'DE_2040',
'GA_2030',
'GA_2040',
'NT_2025',
'NT_2030',
'NT_2040'
])
# Set the path for the data
PATH_TO_TYNDP = '/media/DataStager1/Other/CapacityDistribution/TYNDP/Origin/'
# Read NetCDF
FOLDER_ERA5_CF_NUTS0 = '/media/DataStager2/ERA5-EU_CF/NUTS0/'
FOLDER_EV_NUTS0 = '/media/DataStager2/ERA5-EU_EV/NUTS0/'
print('NOTIFY: Initialization is complete, Skynet active')
#%%
# =============================================================================
# Load in the datafiles with the capacity distributions
# =============================================================================
# Select the distribution to run over
# capacity_distribution = CD_TYNDP_input[0]
for capacity_distribution in CD_TYNDP_input:
print('NOTIFY: Working on Distribution '+capacity_distribution+'!')
# Read in the Capacity Distribution from the TYNDP
df_cd_tyndp = pd.read_csv(PATH_TO_TYNDP+'TYNDP-'+capacity_distribution+'.csv', sep=';', names=['Code', 'Type', 'Capacity'])
#%%
# =============================================================================
# Load in the NUTS0 data
# =============================================================================
# Set the year to run over
for year in years:
print('NOTIFY: Working on year '+year+'!')
# Load in the NetCDF
ds_cf_solar = xr.open_dataset(FOLDER_ERA5_CF_NUTS0+'ERA5-EU_CF-NUTS0_solar_'+str(year)+'.nc') #, chunks = {'time': 8760})
ds_cf_windoff = xr.open_dataset(FOLDER_ERA5_CF_NUTS0+'ERA5-EU_CF-NUTS0_windoff_'+str(year)+'.nc') #, chunks = {'time': 8760})
ds_cf_windon = xr.open_dataset(FOLDER_ERA5_CF_NUTS0+'ERA5-EU_CF-NUTS0_windon_'+str(year)+'.nc') #, chunks = {'time': 8760})
#%%
# =============================================================================
# Multiply the capacity distribution with the capacity factor
# =============================================================================
# Set a new dataset for energy variables
ds_ev_solar = xr.Dataset()
ds_ev_windoff = xr.Dataset()
ds_ev_windon = xr.Dataset()
# The easy countries
countries = [
'BA', 'BE', 'DE',
'EE', 'ES', 'FI',
'HR', 'IE',
'LT', 'LU',
'LV', 'NL', 'SI',
'AT', 'CH', 'CZ',
'HU', 'PL',
'PT', 'SK', 'TR',
'AL', 'BG', 'CY',
'ME', 'MK',
'RO', 'RS',
'MT', # to small but used offshore data for onshore figures (small capacity)
'FR', # without the island of corsica
'UK', # without the region of northern ireland
]
# The easy countries
for country in countries:
#%% working on solar
# Define the capacity installed
country_cap_distr_solar = df_cd_tyndp.where(df_cd_tyndp['Code'] == country).where(df_cd_tyndp['Type'] == 'Solar PV').dropna()['Capacity'].values
# If this capacity is not defined, do not calculate
if country_cap_distr_solar.size == 0:
print('There is no solar capacity for '+country)
# Apply the wind offshore capacity distribution
else:
# apply the cap distribution
ds_ev_solar[country] = country_cap_distr_solar * ds_cf_solar[country+'00']
#%% working on onshore wind
# Define the capacity installed
country_cap_distr_windon = df_cd_tyndp.where(df_cd_tyndp['Code'] == country).where(df_cd_tyndp['Type'] == 'Onshore Wind').dropna()['Capacity'].values
# If this capacity is not defined, do not calculate
if country_cap_distr_windon.size == 0:
print('There is no onshore wind capacity for '+country)
# Apply the wind offshore capacity distribution
else:
# apply the cap distribution
ds_ev_windon[country] = country_cap_distr_windon * ds_cf_windon[country+'00']
#%% working on offshore wind
# Define the capacity installed
country_cap_distr_windoff = df_cd_tyndp.where(df_cd_tyndp['Code'] == country).where(df_cd_tyndp['Type'] == 'Offshore Wind').dropna()['Capacity'].values
# If this capacity is not defined, do not calculate
if country_cap_distr_windoff.size == 0:
print('There is no offshore capacity for '+country)
# Apply the wind offshore capacity distribution
else:
ds_ev_windoff[country] = country_cap_distr_windoff * ds_cf_windoff[country+'00_OFF']
#%%
# =============================================================================
# Working on Greece (just a naming issue)
# =============================================================================
country_cap_distr_windoff = df_cd_tyndp.where(df_cd_tyndp['Code'] == 'EL').where(df_cd_tyndp['Type'] == 'Offshore Wind').dropna()['Capacity'].values
# If this capacity is not defined, do not calculate
if country_cap_distr_windoff.size == 0:
print('There is no offshore capacity for Greece')
else:
ds_ev_windoff['EL'] = df_cd_tyndp.where(df_cd_tyndp['Code'] == 'EL').where(df_cd_tyndp['Type'] == 'Offshore Wind').dropna()['Capacity'].values * ds_cf_windoff['GR00_OFF']
ds_ev_windon['EL'] = df_cd_tyndp.where(df_cd_tyndp['Code'] == 'EL').where(df_cd_tyndp['Type'] == 'Onshore Wind').dropna()['Capacity'].values * ds_cf_windon['GR00']
ds_ev_solar['EL'] = df_cd_tyndp.where(df_cd_tyndp['Code'] == 'EL').where(df_cd_tyndp['Type'] == 'Solar PV').dropna()['Capacity'].values * ds_cf_solar['GR00']
# =============================================================================
# Working on Ukraine, without the oblast Moekatsjevo
# =============================================================================
ds_ev_windon['UA'] = df_cd_tyndp.where(df_cd_tyndp['Code'] == 'UA').where(df_cd_tyndp['Type'] == 'Onshore Wind').dropna()['Capacity'].values * ds_cf_windon['UA02']
# =============================================================================
# Working on Sweden, capacity devided based on CF's of regions
# =============================================================================
# Determin the devisors
SE_devisor_windoff = ds_cf_windoff['SE01_OFF'].mean(...).values + ds_cf_windoff['SE02_OFF'].mean(...).values + ds_cf_windoff['SE03_OFF'].mean(...).values + ds_cf_windoff['SE04_OFF'].mean(...).values
SE_devisor_windon = ds_cf_windon['SE01'].mean(...).values + ds_cf_windon['SE02'].mean(...).values + ds_cf_windon['SE03'].mean(...).values + ds_cf_windon['SE04'].mean(...).values
SE_devisor_solar = ds_cf_solar['SE01'].mean(...).values + ds_cf_solar['SE02'].mean(...).values + ds_cf_solar['SE03'].mean(...).values + ds_cf_solar['SE04'].mean(...).values
# Find the capacities
SE_cap_off = df_cd_tyndp.where(df_cd_tyndp['Code'] == 'SE').where(df_cd_tyndp['Type'] == 'Offshore Wind').dropna()['Capacity'].values[0]
SE_cap_on = df_cd_tyndp.where(df_cd_tyndp['Code'] == 'SE').where(df_cd_tyndp['Type'] == 'Offshore Wind').dropna()['Capacity'].values[0]
SE_cap_sol = df_cd_tyndp.where(df_cd_tyndp['Code'] == 'SE').where(df_cd_tyndp['Type'] == 'Solar PV').dropna()['Capacity'].values[0]
# Calculate the regional energy generation for offshore wind
SE01_off = SE_cap_off * ds_cf_windoff['SE01_OFF'] * ds_cf_windoff['SE01_OFF'].mean(...).values * SE_devisor_windoff**-1
SE02_off = SE_cap_off * ds_cf_windoff['SE02_OFF'] * ds_cf_windoff['SE02_OFF'].mean(...).values * SE_devisor_windoff**-1
SE03_off = SE_cap_off * ds_cf_windoff['SE03_OFF'] * ds_cf_windoff['SE03_OFF'].mean(...).values * SE_devisor_windoff**-1
SE04_off = SE_cap_off * ds_cf_windoff['SE04_OFF'] * ds_cf_windoff['SE04_OFF'].mean(...).values * SE_devisor_windoff**-1
# Sum over all regions to obtain national figures
ds_ev_windoff['SE'] = SE01_off + SE02_off + SE03_off + SE04_off
# Calculate the regional energy generation for onshore wind
SE01_on = SE_cap_on * ds_cf_windon['SE01'] * ds_cf_windon['SE01'].mean(...).values * SE_devisor_windon**-1
SE02_on = SE_cap_on * ds_cf_windon['SE02'] * ds_cf_windon['SE02'].mean(...).values * SE_devisor_windon**-1
SE03_on = SE_cap_on * ds_cf_windon['SE03'] * ds_cf_windon['SE03'].mean(...).values * SE_devisor_windon**-1
SE04_on = SE_cap_on * ds_cf_windon['SE04'] * ds_cf_windon['SE04'].mean(...).values * SE_devisor_windon**-1
# Sum over all regions to obtain national figures
ds_ev_windon['SE'] = SE01_on + SE02_on + SE03_on + SE04_on
# Calculate the regional energy generation for solar PV
SE01_sol = SE_cap_sol * ds_cf_solar['SE01'] * ds_cf_solar['SE01'].mean(...).values * SE_devisor_solar**-1
SE02_sol = SE_cap_sol * ds_cf_solar['SE02'] * ds_cf_solar['SE02'].mean(...).values * SE_devisor_solar**-1
SE03_sol = SE_cap_sol * ds_cf_solar['SE03'] * ds_cf_solar['SE03'].mean(...).values * SE_devisor_solar**-1
SE04_sol = SE_cap_sol * ds_cf_solar['SE04'] * ds_cf_solar['SE04'].mean(...).values * SE_devisor_solar**-1
# Sum over all regions to obtain national figures
ds_ev_solar['SE'] = SE01_sol + SE02_sol + SE03_sol + SE04_sol
# =============================================================================
# Working on Norway
# =============================================================================
# Determin the devisors
NO_dev_on = ds_cf_windon['NOM1'].mean(...).values + ds_cf_windon['NON1'].mean(...).values + ds_cf_windon['NOS0'].mean(...).values
NO_dev_sol = ds_cf_solar['NOM1'].mean(...).values + ds_cf_solar['NON1'].mean(...).values + ds_cf_solar['NOS0'].mean(...).values
# Find the capacities
NO_cap_on = df_cd_tyndp.where(df_cd_tyndp['Code'] == 'NO').where(df_cd_tyndp['Type'] == 'Onshore Wind').dropna()['Capacity'].values[0]
NO_cap_sol = df_cd_tyndp.where(df_cd_tyndp['Code'] == 'NO').where(df_cd_tyndp['Type'] == 'Solar PV').dropna()['Capacity'].values[0]
# Calculate the regional energy generation for offshore wind
# If this capacity is not defined, do not calculate
if df_cd_tyndp.where(df_cd_tyndp['Code'] == 'NO').where(df_cd_tyndp['Type'] == 'Offshore Wind').dropna()['Capacity'].values.size == 0:
print('There is no offshore capacity for Norway')
else:
NO_cap_off = df_cd_tyndp.where(df_cd_tyndp['Code'] == 'NO').where(df_cd_tyndp['Type'] == 'Offshore Wind').dropna()['Capacity'].values[0]
NO_dev_off = ds_cf_windoff['NOM1_OFF'].mean(...).values + ds_cf_windoff['NON1_OFF'].mean(...).values
NOM1_off = NO_cap_off * ds_cf_windoff['NOM1_OFF'] * ds_cf_windoff['NOM1_OFF'].mean(...).values * NO_dev_off**-1
NON1_off = NO_cap_off * ds_cf_windoff['NON1_OFF'] * ds_cf_windoff['NON1_OFF'].mean(...).values * NO_dev_off**-1
# Sum over all regions to obtain national figures
ds_ev_windoff['NO'] = NOM1_off + NON1_off
# Calculate the regional energy generation for onshore wind
NOM1_on = NO_cap_on * ds_cf_windon['NOM1'] * ds_cf_windon['NOM1'].mean(...).values * NO_dev_on**-1
NON1_on = NO_cap_on * ds_cf_windon['NON1'] * ds_cf_windon['NON1'].mean(...).values * NO_dev_on**-1
NOS0_on = NO_cap_on * ds_cf_windon['NOS0'] * ds_cf_windon['NOS0'].mean(...).values * NO_dev_on**-1
# Sum over all regions to obtain national figures
ds_ev_windon['NO'] = NOM1_on + NON1_on + NOS0_on
# Calculate the regional energy generation for solar PV
NOM1_sol = NO_cap_sol * ds_cf_solar['NOM1'] * ds_cf_solar['NOM1'].mean(...).values * NO_dev_sol**-1
NON1_sol = NO_cap_sol * ds_cf_solar['NON1'] * ds_cf_solar['NON1'].mean(...).values * NO_dev_sol**-1
NOS0_sol = NO_cap_sol * ds_cf_solar['NOS0'] * ds_cf_solar['NOS0'].mean(...).values * NO_dev_sol**-1
# Sum over all regions to obtain national figures
ds_ev_solar['NO'] = NOM1_sol + NON1_sol + NOS0_sol
# =============================================================================
# Working on Denmark
# =============================================================================
# Determin the devisors
DK_dev_off = ds_cf_windoff['DKE1_OFF'].mean(...).values + ds_cf_windoff['DKKF_OFF'].mean(...).values + ds_cf_windoff['DKW1_OFF'].mean(...).values
DK_dev_on = ds_cf_windon['DKE1'].mean(...).values + ds_cf_windon['DKW1'].mean(...).values
DK_dev_sol = ds_cf_solar['DKE1'].mean(...).values + ds_cf_solar['DKW1'].mean(...).values
# Find the capacities
DK_cap_off = df_cd_tyndp.where(df_cd_tyndp['Code'] == 'DK').where(df_cd_tyndp['Type'] == 'Offshore Wind').dropna()['Capacity'].values[0]
DK_cap_on = df_cd_tyndp.where(df_cd_tyndp['Code'] == 'DK').where(df_cd_tyndp['Type'] == 'Onshore Wind').dropna()['Capacity'].values[0]
DK_cap_sol = df_cd_tyndp.where(df_cd_tyndp['Code'] == 'DK').where(df_cd_tyndp['Type'] == 'Solar PV').dropna()['Capacity'].values[0]
# Calculate the regional energy generation for offshore wind
DKE1_off = DK_cap_off * ds_cf_windoff['DKE1_OFF'] * ds_cf_windoff['DKE1_OFF'].mean(...).values * DK_dev_off**-1
DKKF_off = DK_cap_off * ds_cf_windoff['DKKF_OFF'] * ds_cf_windoff['DKKF_OFF'].mean(...).values * DK_dev_off**-1
DKW1_off = DK_cap_off * ds_cf_windoff['DKW1_OFF'] * ds_cf_windoff['DKW1_OFF'].mean(...).values * DK_dev_off**-1
# Sum over all regions to obtain national figures
ds_ev_windoff['DK'] = DKE1_off + DKKF_off + DKW1_off
# Calculate the regional energy generation for onshore wind
DKE1_on = DK_cap_on * ds_cf_windon['DKE1'] * ds_cf_windon['DKE1'].mean(...).values * DK_dev_on**-1
DKW1_on = DK_cap_on * ds_cf_windon['DKW1'] * ds_cf_windon['DKW1'].mean(...).values * DK_dev_on**-1
# Sum over all regions to obtain national figures
ds_ev_windon['DK'] = DKE1_on + DKW1_on
# Calculate the regional energy generation for solar PV
DKE1_sol = DK_cap_sol * ds_cf_solar['DKE1'] * ds_cf_solar['DKE1'].mean(...).values * DK_dev_sol**-1
DKW1_sol = DK_cap_sol * ds_cf_solar['DKW1'] * ds_cf_solar['DKW1'].mean(...).values * DK_dev_sol**-1
# Sum over all regions to obtain national figures
ds_ev_solar['DK'] = DKE1_sol + DKW1_sol
# =============================================================================
# Working on Italy
# =============================================================================
# Determin the devisors
IT_dev_on = ds_cf_windon['ITCA'].mean(...).values + ds_cf_windon['ITCN'].mean(...).values + ds_cf_windon['ITCS'].mean(...).values + ds_cf_windon['ITN1'].mean(...).values + ds_cf_windon['ITS1'].mean(...).values + ds_cf_windon['ITSA'].mean(...).values + ds_cf_windon['ITSI'].mean(...).values
IT_dev_sol = ds_cf_solar['ITCA'].mean(...).values + ds_cf_solar['ITCN'].mean(...).values + ds_cf_solar['ITCS'].mean(...).values + ds_cf_solar['ITN1'].mean(...).values + ds_cf_solar['ITS1'].mean(...).values + ds_cf_solar['ITSA'].mean(...).values + ds_cf_solar['ITSI'].mean(...).values
# Find the capacities
IT_cap_on = df_cd_tyndp.where(df_cd_tyndp['Code'] == 'IT').where(df_cd_tyndp['Type'] == 'Onshore Wind').dropna()['Capacity'].values[0]
IT_cap_sol = df_cd_tyndp.where(df_cd_tyndp['Code'] == 'IT').where(df_cd_tyndp['Type'] == 'Solar PV').dropna()['Capacity'].values[0]
# Calculate the regional energy generation for offshore wind
# If this capacity is not defined, do not calculate
if df_cd_tyndp.where(df_cd_tyndp['Code'] == 'IT').where(df_cd_tyndp['Type'] == 'Offshore Wind').dropna()['Capacity'].values.size == 0:
print('There is no offshore capacity for Italy')
else:
IT_dev_off = ds_cf_windoff['ITCA_OFF'].mean(...).values + ds_cf_windoff['ITCN_OFF'].mean(...).values + ds_cf_windoff['ITCS_OFF'].mean(...).values + ds_cf_windoff['ITN1_OFF'].mean(...).values + ds_cf_windoff['ITS1_OFF'].mean(...).values + ds_cf_windoff['ITSA_OFF'].mean(...).values + ds_cf_windoff['ITSI_OFF'].mean(...).values
IT_cap_off = df_cd_tyndp.where(df_cd_tyndp['Code'] == 'IT').where(df_cd_tyndp['Type'] == 'Offshore Wind').dropna()['Capacity'].values[0]
ITCA_off = IT_cap_off * ds_cf_windoff['ITCA_OFF'] * ds_cf_windoff['ITCA_OFF'].mean(...).values * IT_dev_off**-1
ITCN_off = IT_cap_off * ds_cf_windoff['ITCN_OFF'] * ds_cf_windoff['ITCN_OFF'].mean(...).values * IT_dev_off**-1
ITCS_off = IT_cap_off * ds_cf_windoff['ITCS_OFF'] * ds_cf_windoff['ITCS_OFF'].mean(...).values * IT_dev_off**-1
ITN1_off = IT_cap_off * ds_cf_windoff['ITN1_OFF'] * ds_cf_windoff['ITN1_OFF'].mean(...).values * IT_dev_off**-1
ITS1_off = IT_cap_off * ds_cf_windoff['ITS1_OFF'] * ds_cf_windoff['ITS1_OFF'].mean(...).values * IT_dev_off**-1
ITSA_off = IT_cap_off * ds_cf_windoff['ITSA_OFF'] * ds_cf_windoff['ITSA_OFF'].mean(...).values * IT_dev_off**-1
ITSI_off = IT_cap_off * ds_cf_windoff['ITSI_OFF'] * ds_cf_windoff['ITSI_OFF'].mean(...).values * IT_dev_off**-1
# Sum over all regions to obtain national figures
ds_ev_windoff['IT'] = ITCA_off + ITCN_off + ITCS_off + ITN1_off + ITS1_off + ITSA_off + ITSI_off
# Calculate the regional energy generation for onshore wind
ITCA_on = IT_cap_on * ds_cf_windon['ITCA'] * ds_cf_windon['ITCA'].mean(...).values * IT_dev_on**-1
ITCN_on = IT_cap_on * ds_cf_windon['ITCN'] * ds_cf_windon['ITCN'].mean(...).values * IT_dev_on**-1
ITCS_on = IT_cap_on * ds_cf_windon['ITCS'] * ds_cf_windon['ITCS'].mean(...).values * IT_dev_on**-1
ITN1_on = IT_cap_on * ds_cf_windon['ITN1'] * ds_cf_windon['ITN1'].mean(...).values * IT_dev_on**-1
ITS1_on = IT_cap_on * ds_cf_windon['ITS1'] * ds_cf_windon['ITS1'].mean(...).values * IT_dev_on**-1
ITSA_on = IT_cap_on * ds_cf_windon['ITSA'] * ds_cf_windon['ITSA'].mean(...).values * IT_dev_on**-1
ITSI_on = IT_cap_on * ds_cf_windon['ITSI'] * ds_cf_windon['ITSI'].mean(...).values * IT_dev_on**-1
# Sum over all regions to obtain national figures
ds_ev_windon['IT'] = ITCA_on + ITCN_on + ITCS_on + ITN1_on + ITS1_on + ITSA_on + ITSI_on
# Calculate the regional energy generation for solar PV
ITCA_sol = IT_cap_sol * ds_cf_solar['ITCA'] * ds_cf_solar['ITCA'].mean(...).values * IT_dev_sol**-1
ITCN_sol = IT_cap_sol * ds_cf_solar['ITCN'] * ds_cf_solar['ITCN'].mean(...).values * IT_dev_sol**-1
ITCS_sol = IT_cap_sol * ds_cf_solar['ITCS'] * ds_cf_solar['ITCS'].mean(...).values * IT_dev_sol**-1
ITN1_sol = IT_cap_sol * ds_cf_solar['ITN1'] * ds_cf_solar['ITN1'].mean(...).values * IT_dev_sol**-1
ITS1_sol = IT_cap_sol * ds_cf_solar['ITS1'] * ds_cf_solar['ITS1'].mean(...).values * IT_dev_sol**-1
ITSA_sol = IT_cap_sol * ds_cf_solar['ITSA'] * ds_cf_solar['ITSA'].mean(...).values * IT_dev_sol**-1
ITSI_sol = IT_cap_sol * ds_cf_solar['ITSI'] * ds_cf_solar['ITSI'].mean(...).values * IT_dev_sol**-1
# Sum over all regions to obtain national figures
ds_ev_solar['IT'] = ITCA_sol + ITCN_sol + ITCS_sol + ITN1_sol + ITS1_sol + ITSA_sol + ITSI_sol
#%%
# =============================================================================
# Time to save the data
# =============================================================================
# Setting the general dataset attributes
ds_ev_windoff.attrs.update(
author = 'Laurens Stoop UU/KNMI/TenneT',
variables = 'Wind offshore electricity generation for a certain region',
units = 'MWh',
created = datetime.datetime.today().strftime('%d-%m-%Y'),
region_definition = 'ENTSO-E StudyZones at national level aggregated',
CapacityDistribution = 'TYNDP-'+capacity_distribution,
data_source = 'Capacity factors based on ERA5 reanalysis data, contains modified Copernicus Climate Change Service information [31-05-2021]'
)
# copy most and update partially
ds_ev_windon.attrs = ds_ev_windoff.attrs
ds_ev_windon.attrs.update(
variables = 'Wind onshore electricity generation for a certain region',
)
ds_ev_solar.attrs = ds_ev_windoff.attrs
ds_ev_solar.attrs.update(
variables = 'Solar PV electricity generation for a certain region',
)
# Saving the files as NetCDF
ds_ev_windoff.to_netcdf(FOLDER_EV_NUTS0+capacity_distribution+'/ERA5-EU_EV_TYNDP-'+capacity_distribution+'_WOF_'+str(year)+'.nc', encoding={'time':{'units':'days since 1900-01-01'}})
ds_ev_windon.to_netcdf(FOLDER_EV_NUTS0+capacity_distribution+'/ERA5-EU_EV_TYNDP-'+capacity_distribution+'_WON_'+str(year)+'.nc', encoding={'time':{'units':'days since 1900-01-01'}})
ds_ev_solar.to_netcdf(FOLDER_EV_NUTS0+capacity_distribution+'/ERA5-EU_EV_TYNDP-'+capacity_distribution+'_SPV_'+str(year)+'.nc', encoding={'time':{'units':'days since 1900-01-01'}})
# Converting ot Pandas
df_windoff = ds_ev_windoff.to_pandas()
df_windon = ds_ev_windon.to_pandas()
df_solar = ds_ev_solar.to_pandas()
# Saving as CSV
df_windoff.to_csv(FOLDER_EV_NUTS0+'csv/ERA5-EU_EV_TYNDP-'+capacity_distribution+'_WOF_'+str(year)+'.csv')
df_windon.to_csv(FOLDER_EV_NUTS0+'csv/ERA5-EU_EV_TYNDP-'+capacity_distribution+'_WON_'+str(year)+'.csv')
df_solar.to_csv(FOLDER_EV_NUTS0+'csv/ERA5-EU_EV_TYNDP-'+capacity_distribution+'_SPV_'+str(year)+'.csv')
| 24,490 | 57.590909 | 337 |
py
|
EnergyVariables
|
EnergyVariables-main/src/archive/2_TYNDP_MarketZones.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Remade on Sun 30 May 2021 22:32
@author: Laurens Stoop - [email protected]
"""
#%%
# =============================================================================
# Dependencies
# =============================================================================
## Importing modules
import xarray as xr
import numpy as np
# import regionmask
# import geopandas as gpd
import datetime
import pandas as pd
# import matplotlib.pyplot as plt
# import os.path
# Select the years to run
years = np.array([
# '1950', '1951', '1952',
# '1953', '1954', '1955',
# '1956', '1957', '1958',
# '1959', '1960', '1961',
# '1962', '1963', '1964',
# '1965', '1966', '1967',
# '1968', '1969', '1970',
# '1971', '1972', '1973',
# '1974', '1975', '1976',
# '1977', '1978',
'1979', '1980', '1981',
'1982', '1983', '1984',
'1985', '1986', '1987',
'1988', '1989', '1990',
'1991', '1992', '1993',
'1994', '1995', '1996',
'1997', '1998', '1999',
'2000', '2001', '2002',
'2003', '2004', '2005',
'2006', '2007', '2008',
'2009', '2010', '2011',
'2012', '2013',
'2014',
'2015', '2016', '2017',
'2018', '2019',
'2020'
])
# Versions of the TYNDP
CD_TYNDP_input = np.array([
'DE_2030',
'DE_2040',
'GA_2030',
'GA_2040',
'NT_2025',
'NT_2030',
'NT_2040'
])
# Set the path for the data
PATH_TO_TYNDP = '/media/DataStager1/Other/CapacityDistribution/TYNDP/Originv3/'
# Read NetCDF
FOLDER_ERA5_CF_NUTS0 = '/media/DataStager2/ERA5-EU_CF/MarketZones/'
FOLDER_EV_NUTS0 = '/media/DataStager2/ERA5-EU_EV/MarketZones/'
print('NOTIFY: Initialization is complete, Skynet active')
#%%
# =============================================================================
# Load in the MarketZone data
# =============================================================================
# Set the year to run over
for year in years:
print('NOTIFY: Working on year '+year+'!')
# Load in the NetCDF
ds_cf_solar = xr.open_dataset(FOLDER_ERA5_CF_NUTS0+'ERA5-EU_CF-MarketZones_solar_'+str(year)+'.nc') #, chunks = {'time': 8760})
ds_cf_windoff = xr.open_dataset(FOLDER_ERA5_CF_NUTS0+'ERA5-EU_CF-MarketZones_windoff_'+str(year)+'.nc') #, chunks = {'time': 8760})
ds_cf_windon = xr.open_dataset(FOLDER_ERA5_CF_NUTS0+'ERA5-EU_CF-MarketZones_windon_'+str(year)+'.nc') #, chunks = {'time': 8760})
#%%
# =============================================================================
# Load in the datafiles with the capacity distributions
# =============================================================================
# Select the distribution to run over
# capacity_distribution = CD_TYNDP_input[0]
for capacity_distribution in CD_TYNDP_input:
print('NOTIFY: Working on Distribution '+capacity_distribution+'!')
# Read in the Capacity Distribution from the TYNDP
df_cd_tyndp = pd.read_csv(PATH_TO_TYNDP+'TYNDP-'+capacity_distribution+'.csv' )
# Set the index nicely
df_cd_tyndp = df_cd_tyndp.set_index('Country')
# now transpose the data
df_cd_tyndp = df_cd_tyndp.transpose()
#%%
# =============================================================================
# Multiply the capacity distribution with the capacity factor
# =============================================================================
# Set a new dataset for energy variables
ds_ev_solar = xr.Dataset()
ds_ev_windoff = xr.Dataset()
ds_ev_windon = xr.Dataset()
# The countries we now loop
for country in df_cd_tyndp.index:
#%% working on solar
# Define the capacity installed
country_cap_distr_solar = df_cd_tyndp.loc[country].loc['Solar PV']
# If this capacity is not defined, do not calculate
if country_cap_distr_solar.size == 0 or country_cap_distr_solar == 0:
print('There is no solar capacity for '+country)
# Fix the Greek names to international standard
elif country == 'EL00':
ds_ev_solar[country] = country_cap_distr_solar * ds_cf_solar['GR00']
elif country == 'EL03':
ds_ev_solar[country] = country_cap_distr_solar * ds_cf_solar['GR03']
# Fix luxembour (somehow it is called LUG/LUF/LUB)
elif country == 'LUG1':
ds_ev_solar['LU00'] = df_cd_tyndp.loc['LUG1'].loc['Solar PV'] * ds_cf_solar['LU00']
# Apply the wind offshore capacity distribution
else:
# apply the cap distribution
ds_ev_solar[country] = country_cap_distr_solar * ds_cf_solar[country]
#%% working on onshore wind
# Define the capacity installed
country_cap_distr_windon = df_cd_tyndp.loc[country].loc['Onshore Wind']
# If this capacity is not defined, do not calculate
if country_cap_distr_windon.size == 0 or country_cap_distr_windon == 0:
print('There is no onshore wind capacity for '+country)
# Fix the Greek names to international standard
elif country == 'EL00':
ds_ev_windon[country] = country_cap_distr_windon * ds_cf_windon['GR00']
elif country == 'EL03':
ds_ev_windon[country] = country_cap_distr_windon * ds_cf_windon['GR03']
# Fix luxembour (somehow it is called LUG/LUF/LUB)
elif country == 'LUG1':
ds_ev_windon['LU00'] = df_cd_tyndp.loc['LUG1'].loc['Onshore Wind'] * ds_cf_windon['LU00']
# Apply the wind offshore capacity distribution
else:
# apply the cap distribution
ds_ev_windon[country] = country_cap_distr_windon * ds_cf_windon[country]
#%% working on offshore wind
# Define the capacity installed
country_cap_distr_windoff = df_cd_tyndp.loc[country].loc['Offshore Wind']
# If this capacity is not defined, do not calculate
if country_cap_distr_windoff.size == 0 or country_cap_distr_windoff == 0:
print('There is no offshore capacity for '+country)
# Fix the small easternly German region
elif country == 'DEKF':
ds_ev_windoff[country] = country_cap_distr_windoff * ds_cf_windoff['DE00_OFF']
# Fix the Greek names to international standard
elif country == 'EL00':
ds_ev_windoff[country] = country_cap_distr_windoff * ds_cf_windoff['GR00_OFF']
elif country == 'EL03':
ds_ev_windoff[country] = country_cap_distr_windoff * ds_cf_windoff['GR00_OFF']
# Apply the wind offshore capacity distribution
else:
ds_ev_windoff[country] = country_cap_distr_windoff * ds_cf_windoff[country+'_OFF']
#%%
# =============================================================================
# Time to save the data
# =============================================================================
# Setting the general dataset attributes
ds_ev_windoff.attrs.update(
author = 'Laurens Stoop UU/KNMI/TenneT',
variables = 'Wind offshore electricity generation',
units = 'MWh',
created = datetime.datetime.today().strftime('%d-%m-%Y'),
region_definition = 'ENTSO-E MarketZones',
CapacityDistribution = 'TYNDP-'+capacity_distribution,
data_source = 'Energy production variables based on TYNDP scenarios and ERA5 reanalysis data, contains modified Copernicus Climate Change Service information [31-05-2021]'
)
# copy most and update partially
ds_ev_windon.attrs = ds_ev_windoff.attrs
ds_ev_windon.attrs.update(
variables = 'Wind onshore electricity generation',
)
ds_ev_solar.attrs = ds_ev_windoff.attrs
ds_ev_solar.attrs.update(
variables = 'Solar PV electricity generation',
)
# Saving the files as NetCDF
# ds_ev_windoff.to_netcdf(FOLDER_EV_NUTS0+capacity_distribution+'/ERA5-EU_EV_TYNDP-'+capacity_distribution+'_WOF_'+str(year)+'.nc', encoding={'time':{'units':'days since 1900-01-01'}})
# ds_ev_windon.to_netcdf(FOLDER_EV_NUTS0+capacity_distribution+'/ERA5-EU_EV_TYNDP-'+capacity_distribution+'_WON_'+str(year)+'.nc', encoding={'time':{'units':'days since 1900-01-01'}})
# ds_ev_solar.to_netcdf(FOLDER_EV_NUTS0+capacity_distribution+'/ERA5-EU_EV_TYNDP-'+capacity_distribution+'_SPV_'+str(year)+'.nc', encoding={'time':{'units':'days since 1900-01-01'}})
# Converting ot Pandas
df_windoff = ds_ev_windoff.to_pandas()
df_windon = ds_ev_windon.to_pandas()
df_solar = ds_ev_solar.to_pandas()
# Saving as CSV
df_windoff.to_csv(FOLDER_EV_NUTS0+capacity_distribution+'/ERA5-EU_EV_TYNDP-'+capacity_distribution+'_WOF_'+str(year)+'.csv')
df_windon.to_csv(FOLDER_EV_NUTS0+capacity_distribution+'/ERA5-EU_EV_TYNDP-'+capacity_distribution+'_WON_'+str(year)+'.csv')
df_solar.to_csv(FOLDER_EV_NUTS0+capacity_distribution+'/ERA5-EU_EV_TYNDP-'+capacity_distribution+'_SPV_'+str(year)+'.csv')
| 10,284 | 40.471774 | 192 |
py
|
EnergyVariables
|
EnergyVariables-main/src/archive/1_ERA5_ENTSO-E_MarketZones.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Restructered on Sun 26 Jan 2022 17:04
@author: Laurens Stoop - [email protected]
Following example by Matteo de Felice: http://www.matteodefelice.name/post/aggregating-gridded-data/
"""
#%%
# =============================================================================
# Dependencies
# =============================================================================
## Importing modules
import xarray as xr
import numpy as np
import regionmask
import geopandas as gpd
import datetime
# import pandas as pd
# import matplotlib.pyplot as plt
import os.path
# Select the years to run
years = np.array([
# '1950', '1951', '1952',
# '1953', '1954', '1955',
# '1956', '1957', '1958',
# '1959', '1960', '1961',
# '1962', '1963', '1964',
# '1965', '1966', '1967',
# '1968', '1969', '1970',
# '1971', '1972', '1973',
# '1974', '1975', '1976',
# '1977', '1978',
# '1979', '1980', '1981',
# '1982', '1983', '1984',
# '1985', '1986', '1987',
# '1988', '1989',
'1990',
'1991', '1992', '1993',
'1994', '1995', '1996',
'1997', '1998', '1999',
# '2000', '2001', '2002',
# '2003', '2004', '2005',
# '2006', '2007', '2008',
# '2009', '2010', '2011',
# '2012', '2013', '2014',
# '2015', '2016', '2017',
# '2018', '2019', '2020'
])
# Set the path for the data
PATH_TO_NUTS0 = '/media/DataStager1/Other/RegionDefinitions/ENTSO-E_StudyZones/DTU-PECD22-Polygons_SZ_VF2021.shp'
# PATH_TO_NUTS1 = '/media/DataStager1/Other/RegionDefinitions/ENTSO-E_StudyZones/DTU-PECD22-Polygons_VF2021.shp'
# Read NetCDF
FOLDER_WITH_NETCDF = '/media/DataStager2/ERA5_CF/'
FOLDER_STORE = '/media/DataStager2/ERA5_EV/'
print('NOTIFY: Initialization is complete, Skynet active')
#%%
# =============================================================================
# Load in the base shapefile
# =============================================================================
# Load the shapefile
nuts0 = gpd.read_file(PATH_TO_NUTS0)
# nuts1 = gpd.read_file(PATH_TO_NUTS1)
# There are regions we do not consider
not_considered_nuts0 = [
'JO00', 'JO00_OFF', # Jordany
'MA00', 'MA00_OFF', # Marocco
'SY00', 'SY00_OFF', # Syria
'TN00', 'TN00_OFF', # Tunisia
'IS00', 'IS00_OFF', # Iceland
'IL00', 'IL00_OFF', # Israel
'PS00', 'PS00_OFF', # Palistine & Gaza
'EG00', 'EG00_OFF', # Egypt
'DZ00', 'DZ00_OFF', # Algeria
'LY00', 'LY00_OFF', # Libya
# Regions not considered resolution or model constrains
'SI00_OFF', # Slovenia offshore is to small for ERA5 data
'BA00_OFF', # Bosnia and Herzegovina offshore region to small for ERA5 data
'MT00', # Malta is to small for data on the island
]
# Now set all nuts0 regions we do not consider to NaN's
for NC in not_considered_nuts0:
nuts0 = nuts0.where(nuts0['Study Zone'] != NC)
# Removal of all NaN's from the table
nuts0 = nuts0.dropna()
# There is an empty LY00 zone in there
# nuts1.iloc[246]
# nuts1 = nuts1.drop(index=246)
# To check some info you could read the headers of the shapefiles
# nuts0.head() # to check the contents --> 121 on-/offshore definitions
# nuts1.head() # to check the contents --> 262 on-/offshore definitions
#%%
# =============================================================================
# Load in the datafiles them self
# =============================================================================
# The mega loop
for year in years:
# Define the file name
file_save_solar = FOLDER_STORE+'ERA5-EU_CF-MarketZones_solar_'+str(year)+'.nc'
file_save_windoff = FOLDER_STORE+'ERA5-EU_CF-MarketZones_windoff_'+str(year)+'.nc'
file_save_windon = FOLDER_STORE+'ERA5-EU_CF-MarketZones_windon_'+str(year)+'.nc'
# Check if file allready exist, then get out
if os.path.isfile(file_save_windon) == True:
# Tell us the file exist
print('NOTIFY: Allready applied for year '+year+'!')
# IF the file doesn't exist, apply the distribution
elif os.path.isfile(file_save_windon) == False:
print('NOTIFY: Working on year '+year+'!')
# Load in the NetCDF
ds = xr.open_mfdataset(FOLDER_WITH_NETCDF+'ERA5-EU_CF_'+str(year)+'.nc') #, chunks = {'time': 8760})
#%%
# =============================================================================
# Now we define the regionmask and to later apply it
# =============================================================================
# CALCULATE MASK
SZ0_mask_poly = regionmask.Regions(name = 'ENTSO-E_StudyZone0_Mask', numbers = np.arange(0,len(nuts0)), abbrevs = list(nuts0['Study Zone']), outlines = list(nuts0.geometry.values[i] for i in np.arange(0,len(nuts0)))) # names = list(nuts0['Study Zone']),
# SZ1_mask_poly = regionmask.Regions(name = 'ENTSO-E_StudyZone1_Mask', numbers = list(range(0,262)), abbrevs = list(nuts1['Code']), outlines = list(nuts1.geometry.values[i] for i in range(0,262))) # names = list(nuts1['Study Zone']),
# print(nuts_mask_poly)
# Create the mask
mask = SZ0_mask_poly.mask(ds.isel(time = 0), method = None)
# mask = SZ0_mask_poly.mask(ds.isel(time = 0), method = None)
# mask # To check the contents of the mask defined
#%%
# =============================================================================
# Now selecting a region to select the data
# =============================================================================
# Prepare a dataset for filling with regional mean data
ds_solarCF = xr.Dataset()
ds_windCF_off = xr.Dataset()
ds_windCF_on = xr.Dataset()
# Select a region (the Netherlands is 12/54 in NUTS0)
for ID_REGION in np.arange(0,len(nuts0)):
# Determine the region name
region_name = nuts0.iloc[ID_REGION]['Study Zone']
print('######: working on region '+region_name+' ('+str(ID_REGION)+'/98) !')
# Select the lat/lon combo's as vector to use later
lat = mask.lat.values
lon = mask.lon.values
# We select the region under consideration
sel_mask = mask.where(mask == ID_REGION).values
# Select the specific lat/lon combo that is the minimum bounding box
id_lon = lon[np.where(~np.all(np.isnan(sel_mask), axis=0))]
id_lat = lat[np.where(~np.all(np.isnan(sel_mask), axis=1))]
# This is the fancy loop by Matteo that uses the compute dask function to load and slice the data
out_sel = ds.sel(lat = slice(id_lat[0], id_lat[-1]), lon = slice(id_lon[0], id_lon[-1])).compute().where(mask == ID_REGION)
# # =============================================================================
# # A quick figure for the sanity checks
# # =============================================================================
# plt.figure(figsize=(12,8))
# ax = plt.axes()
# out_sel.solarCF.isel(time = 4140).plot(ax = ax)
# nuts0.plot(ax = ax, alpha = 0.8, facecolor = 'none')
#%%
# =============================================================================
# Regional mean for saving data
# =============================================================================
# Offshore region only have wind
if region_name == 'MT00_OFF':
# fixing the to small region for Malta where we do need data
ds_windCF_off[region_name] = out_sel.windCF_off.groupby('time').mean(...)
ds_windCF_on['MT00'] = out_sel.windCF_on.groupby('time').mean(...)
ds_solarCF['MT00'] = out_sel.solarCF.groupby('time').mean(...)
elif region_name[-4:] == '_OFF':
# Save the regional mean into the main dataset under the region's name
ds_windCF_off[region_name] = out_sel.windCF_off.groupby('time').mean(...)
# Non-offshore regions have wind and solar installed
else:
# Save the regional mean of the onshore wind and solar CF's
ds_windCF_on[region_name] = out_sel.windCF_on.groupby('time').mean(...)
ds_solarCF[region_name] = out_sel.solarCF.groupby('time').mean(...)
#%%
# =============================================================================
# Setting units & saving
# =============================================================================
# Setting the general dataset attributes
ds_solarCF.attrs.update(
author = 'Laurens Stoop UU/KNMI/TenneT',
variables = 'Solar PH capacity factor for a specific region',
units = '[0-1]',
created = datetime.datetime.today().strftime('%d-%m-%Y'),
map_area = 'Europe',
region_definition = 'ENTSO-E StudyZones at national level aggregated',
data_source = 'Capacity factors based on ERA5 reanalysis data, contains modified Copernicus Climate Change Service information [31-05-2021]'
)
# Setting the general dataset attributes
ds_windCF_off.attrs.update(
author = 'Laurens Stoop UU/KNMI/TenneT',
variables = 'Wind Offshore capacity factor for a specific region',
units = '[0-1]',
created = datetime.datetime.today().strftime('%d-%m-%Y'),
map_area = 'Europe',
region_definition = 'ENTSO-E StudyZones at national level aggregated',
data_source = 'Capacity factors based on ERA5 reanalysis data, contains modified Copernicus Climate Change Service information [31-05-2021]'
)
# Setting the general dataset attributes
ds_windCF_on.attrs.update(
author = 'Laurens Stoop UU/KNMI/TenneT',
variables = 'Wind onshore capacity factor for a specific region',
units = '[0-1]',
created = datetime.datetime.today().strftime('%d-%m-%Y'),
map_area = 'Europe',
region_definition = 'ENTSO-E StudyZones at national level aggregated',
data_source = 'Capacity factors based on ERA5 reanalysis data, contains modified Copernicus Climate Change Service information [31-05-2021]'
)
# Saving the file
ds_solarCF.to_netcdf(file_save_solar, encoding={'time':{'units':'days since 1900-01-01'}})
ds_windCF_off.to_netcdf(file_save_windoff, encoding={'time':{'units':'days since 1900-01-01'}})
ds_windCF_on.to_netcdf(file_save_windon, encoding={'time':{'units':'days since 1900-01-01'}})
| 11,419 | 41.932331 | 261 |
py
|
EnergyVariables
|
EnergyVariables-main/src/archive/2_CountryDefinitions.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Restructered on Wed 11 Nov 2020 14:15
@author: Laurens Stoop - [email protected]
"""
#%%
# =============================================================================
# Dependencies
# =============================================================================
## Importing modules
import numpy as np
import xarray as xr
import salem
# Set the path for the data
path_TYNDP = '/media/DataDrive/Other/CapacityDistribution/TYNDP/'
path_RegionDefinition = '/media/DataDrive/Other/RegionDefinitions/'
path_CFdata = '/media/DataGate3/ERA5-EU_CF/'
print('NOTIFY: Initialization is complete, Skynet active')
#%%
# =============================================================================
# Load in the base file where stuff can be, select the regions, save the files
# =============================================================================
# Select the data for the whole region
ds = salem.open_xr_dataset(path_TYNDP+'constant.nc')
# Select the shapefile with the Economic Exclusive Zones for all countries in the world
shdf_eez = salem.read_shapefile(path_RegionDefinition+'EEZ/EEZ_land_v2_201410.shp')
shdf_nuts = salem.read_shapefile(path_RegionDefinition+'nuts-2016/NUTS_0/NUTS_RG_01M_2016_4326_LEVL_0.shp')
# Extra data for outliers
shdf_BA = salem.read_shapefile(path_RegionDefinition+'BA_Bosnia_Herzegovina_adm0/BIH_adm0.shp')
shdf_UA = salem.read_shapefile(path_RegionDefinition+'UA_Ukraine_adm0/ukr_admbnda_adm0_q2_sspe_20171221.shp')
# The nations for which we want info
countrylist = np.array([
# ['Albania','AL'],
# ['Austria','AT'],
# ['Bosnia & Herzegovina','BA'],
# ['Belgium','BE'],
# ['Bulgaria','BG'],
# ['Switzerland','CH'],
# ['Cyprus','CY'],
# ['Czech Republic','CZ'],
# ['Denmark','DK'],
# ['Germany','DE'],
# ['Estonia','EE'],
# ['Greece','EL'],
# ['Spain','ES'],
# ['Finland','FI'],
# ['France','FR'],
# ['Croatia','HR'],
# ['Hungary','HU'],
# ['Ireland','IE'],
# # # ['Iceland','IS'],
# ['Italy', 'IT'],
# ['Lithuania','LT'],
# # # ['Liechtenstein','LI'],
# ['Luxembourg','LU'],
# ['Latvia','LV'],
# ['Montenegro', 'ME'],
# ['Macedonia', 'MK'],
# ['Malta','MT'],
['Netherlands','NL'], #
['Norway','NO'], #
['Poland','PL'], #
['Portugal','PT'],
['Romania','RO'],
['Serbia', 'RS'],
['Sweden','SE'],
['Slovenia','SI'],
['Slovakia','SK'],
['Turkey','TR'],
['Ukraine', 'UA'],
['United Kingdom','UK']
])
#%%
# =============================================================================
# Looping over countries
# =============================================================================
# For loop over all countries
for country_name, country_code in countrylist:
print('NOTIFY: Now we select the dish from '+ country_name)
# filter out the outliers Bosnia & Herzegovina
if country_code =='BA':
# Select a country by name and only use this country from the shape file (shdf)
shape_eez = shdf_eez.loc[shdf_eez['Country'] == country_name]
shape_nuts = shdf_BA.loc[shdf_BA['ISO2'] == country_code]
# Filter out the outlier Ukraine
elif country_code == 'UA':
# Select a country by name and only use this country from the shape file (shdf)
shape_eez = shdf_eez.loc[shdf_eez['Country'] == country_name]
shape_nuts = shdf_UA.loc[shdf_UA['ADM0_EN'] == country_name]
# if the country isn't an outlier, we go ahead
else:
# Select a country by name and only use this country from the shape file (shdf)
shape_eez = shdf_eez.loc[shdf_eez['Country'] == country_name]
shape_nuts = shdf_nuts.loc[shdf_nuts['NUTS_ID'] == country_code]
# Set a subset (dsr) of the DataSet (ds) based on the selected shape (shdf)
ds_eez = ds.salem.subset(shape=shape_eez, margin = 25)
ds_nuts = ds.salem.subset(shape=shape_nuts, margin = 50)
# Select only the region within the subset (dsr) [I am not sure what this does and doesn't do]
ds_eez = ds_eez.salem.roi(shape=shape_eez)
ds_nuts = ds_nuts.salem.roi(shape=shape_nuts)
# Make a quick map to check the selected data, if only one country is selected!
# if np.size(countrylist) == 2:
# # ds_eez.random.salem.quick_map();
# ds_nuts.random.salem.quick_map();
# Fill all non country values with 0
ds_eez = ds_eez.fillna(0.) #1E-20)
ds_nuts = ds_nuts.fillna(0.) #1E-20)
#Define the offshore region
ds_offshore = ds_eez - ds_nuts
# Save the country mask to a file
ds_offshore.to_netcdf(path_TYNDP+'CountryDefinitions_ERA5-EU/CountryDefinitions_ERA5-EU_offshore_'+country_code+'.nc')
ds_nuts.to_netcdf(path_TYNDP+'CountryDefinitions_ERA5-EU/CountryDefinitions_ERA5-EU_onshore_'+country_code+'.nc')
| 5,181 | 33.778523 | 122 |
py
|
EnergyVariables
|
EnergyVariables-main/src/archive/4_EnergyVariables_ERA5-EU_Zuijlen.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Spyder Editor
Restructered on Wed 10 Nov 2020 14:15
@author: Laurens Stoop - [email protected]
"""
#%%
# =============================================================================
# Dependencies
# =============================================================================
# Importing modules
import xarray as xr
import numpy as np
import datetime
import os.path
# File locations
path_source = '/media/DataStager1/temp/'
path_save = '/media/DataStager1/DataECMLPKDD21/'
# Select the years to run
years = np.array([
'1950', '1951', '1952',
'1953', '1954', '1955',
'1956', '1957', '1958',
'1959', '1960', '1961',
'1962', '1963', '1964',
'1965', '1966', '1967',
'1968', '1969', '1970',
'1971', '1972', '1973',
'1974', '1975', '1976',
'1977', '1978',
'1979', '1980', '1981',
'1982', '1983', '1984',
'1985', '1986', '1987',
'1988', '1989', '1990',
'1991', '1992', '1993',
'1994', '1995', '1996',
'1997', '1998', '1999',
'2000', '2001', '2002',
'2003', '2004', '2005',
'2006', '2007', '2008',
'2009', '2010', '2011',
'2012', '2013', '2014',
'2015', '2016', '2017',
'2018', '2019'
])
print('NOTIFY: Basic setup done, defining functions')
#%%
# =============================================================================
# Function definitions
# =============================================================================
# Run over the years
for year in years:
# Define the file name
file_source = path_source+'ERA5-EU_'+year+'.nc'
file_save = path_save+'ERA5-EU_'+year+'.nc'
# IF the file doesn't exist, apply the distribution
if os.path.isfile(file_save) == False:
# Tell us the file exist
print('NOTIFY: Now starting work on applying distribution for year '+year+'!')
# open the source file
with xr.open_dataset(file_source) as ds:
# Open a new dataset
with xr.Dataset() as ds2:
print('------ action (0/2): Apply the distributions')
# Now apply the distributions
ds2['SPV'] = ds.SPV
ds2['WON'] = ds.WON
ds2['WOF'] = ds.WOF
print('------ action (1/2): Force load the new file to do the calculations')
# Force load the file
ds2.load()
# Set the demand attributes
ds2.SPV.attrs.update(
units = 'MWh',
short_name = 'SPV',
long_name = 'SolarPhotoVoltaic',
distribution = 'The distribution of SPV comes from Zuijlen et al.',
method = 'Based on Bett and Thornton, 2016',
description = 'Hourly generation of solar panels')
# Set the demand attributes
ds2.WON.attrs.update(
units = 'MWh',
short_name = 'WON',
description = 'Hourly power generated by onshore wind turbines with hubheigh 98 meter',
method = 'Power curve adapted based on expert feedback',
distribution = 'The distribution of SPV comes from van Zuijlen et al. 2019',
long_name = 'WindONshore')
# Set the demand attributes
ds2.WOF.attrs.update(
units = 'MWh',
short_name = 'WOF',
description = 'Hourly power generated by offshore wind turbines with hubheigh 122 meter',
method = 'Power curve adapted based on expert feedback',
distribution = 'The distribution of SPV comes from van Zuijlen et al. 2019',
long_name = 'WindOFfshore')
# attempt to use less data - This keeps precision as data was float 32
ds2['SPV'] = ds2.SPV.astype('float32')
ds2['WON'] = ds2.WON.astype('float32')
ds2['WOF'] = ds2.WOF.astype('float32')
print('------ action (2/2): Save the file')
# Now save the file
#compression = 'zlib' # zlib is very smoll data
ds2.to_netcdf(file_save, format='NETCDF4', engine='netcdf4') # ,encoding={'SPV':{compression:True},'WON':{compression:True},'WOF':{compression:True}})
| 5,131 | 37.014815 | 170 |
py
|
EnergyVariables
|
EnergyVariables-main/src/archive/2NEW_CountryDefinitions.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Restructered on Wed 11 Nov 2020 14:15
@author: Laurens Stoop - [email protected]
"""
#%%
# =============================================================================
# Dependencies
# =============================================================================
## Importing modules
import numpy as np
import xarray as xr
import salem
# Set the path for the data
path_TYNDP = '/media/DataDrive/Other/CapacityDistribution/TYNDP/'
path_RegionDefinition = '/media/DataDrive/Other/RegionDefinitions/'
path_CFdata = '/media/DataGate3/ERA5-EU_CF/'
print('NOTIFY: Initialization is complete, Skynet active')
#%%
# =============================================================================
# Load in the base file where stuff can be, select the regions, save the files
# =============================================================================
# Select the data for the whole region
ds = salem.open_xr_dataset(path_TYNDP+'constant.nc')
# Select the shapefile with the Economic Exclusive Zones for all countries in the world
shdf_eez = salem.read_shapefile(path_RegionDefinition+'EEZ/EEZ_land_v2_201410.shp')
shdf_nuts = salem.read_shapefile(path_RegionDefinition+'nuts-2016/NUTS_0/NUTS_RG_01M_2016_4326_LEVL_0.shp')
# Extra data for outliers
shdf_BA = salem.read_shapefile(path_RegionDefinition+'BA_Bosnia_Herzegovina_adm0/BIH_adm0.shp')
shdf_UA = salem.read_shapefile(path_RegionDefinition+'UA_Ukraine_adm0/ukr_admbnda_adm0_q2_sspe_20171221.shp')
# The nations for which we want info
countrylist = np.array([
# ['Albania','AL'],
# ['Austria','AT'],
# ['Bosnia & Herzegovina','BA'],
# ['Belgium','BE'],
# ['Bulgaria','BG'],
# ['Switzerland','CH'],
# ['Cyprus','CY'],
# ['Czech Republic','CZ'],
# ['Denmark','DK'],
# ['Germany','DE'],
# ['Estonia','EE'],
# ['Greece','EL'],
# ['Spain','ES'],
# ['Finland','FI'],
# ['France','FR'],
# ['Croatia','HR'],
# ['Hungary','HU'],
# ['Ireland','IE'],
# # # ['Iceland','IS'],
# ['Italy', 'IT'],
# ['Lithuania','LT'],
# # # ['Liechtenstein','LI'],
# ['Luxembourg','LU'],
# ['Latvia','LV'],
# ['Montenegro', 'ME'],
# ['Macedonia', 'MK'],
# ['Malta','MT'],
['Netherlands','NL'], #
['Norway','NO'], #
['Poland','PL'], #
['Portugal','PT'],
['Romania','RO'],
['Serbia', 'RS'],
['Sweden','SE'],
['Slovenia','SI'],
['Slovakia','SK'],
['Turkey','TR'],
['Ukraine', 'UA'],
['United Kingdom','UK']
])
#%%
# =============================================================================
# Looping over countries
# =============================================================================
# For loop over all countries
for country_name, country_code in countrylist:
print('NOTIFY: Now we select the dish from '+ country_name)
# filter out the outliers Bosnia & Herzegovina
if country_code =='BA':
# Select a country by name and only use this country from the shape file (shdf)
shape_eez = shdf_eez.loc[shdf_eez['Country'] == country_name]
shape_nuts = shdf_BA.loc[shdf_BA['ISO2'] == country_code]
# Filter out the outlier Ukraine
elif country_code == 'UA':
# Select a country by name and only use this country from the shape file (shdf)
shape_eez = shdf_eez.loc[shdf_eez['Country'] == country_name]
shape_nuts = shdf_UA.loc[shdf_UA['ADM0_EN'] == country_name]
# if the country isn't an outlier, we go ahead
else:
# Select a country by name and only use this country from the shape file (shdf)
shape_eez = shdf_eez.loc[shdf_eez['Country'] == country_name]
shape_nuts = shdf_nuts.loc[shdf_nuts['NUTS_ID'] == country_code]
# Set a subset (dsr) of the DataSet (ds) based on the selected shape (shdf)
ds_eez = ds.salem.subset(shape=shape_eez, margin = 25)
ds_nuts = ds.salem.subset(shape=shape_nuts, margin = 50)
# Select only the region within the subset (dsr) [I am not sure what this does and doesn't do]
ds_eez = ds_eez.salem.roi(shape=shape_eez)
ds_nuts = ds_nuts.salem.roi(shape=shape_nuts)
# Make a quick map to check the selected data, if only one country is selected!
# if np.size(countrylist) == 2:
# # ds_eez.random.salem.quick_map();
# ds_nuts.random.salem.quick_map();
# Fill all non country values with 0
ds_eez = ds_eez.fillna(0.) #1E-20)
ds_nuts = ds_nuts.fillna(0.) #1E-20)
#Define the offshore region
ds_offshore = ds_eez - ds_nuts
# Save the country mask to a file
ds_offshore.to_netcdf(path_TYNDP+'CountryDefinitions_ERA5-EU/CountryDefinitions_ERA5-EU_offshore_'+country_code+'.nc')
ds_nuts.to_netcdf(path_TYNDP+'CountryDefinitions_ERA5-EU/CountryDefinitions_ERA5-EU_onshore_'+country_code+'.nc')
| 5,181 | 33.778523 | 122 |
py
|
EnergyVariables
|
EnergyVariables-main/src/archive/3_DistributeCapacity-TYNDP.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Spyder Editor
Restructered on Tue 10 Nov 2020 14:15
@author: Laurens Stoop - [email protected]
"""
#%%
# =============================================================================
# Dependencies
# =============================================================================
# Importing modules
import xarray as xr
import numpy as np
import datetime
import pandas as pd
# File locations
file_path = '/media/DataGate3/ERA5-EU_CF/'
file_path = '/media/DataDrive/Other/CacpacityDistribution/TYNDP/CountryTotal/'
#file_path = '/home/stoop/Documents/Data/ERA5/'
print('NOTIFY: Basic setup done, defining functions')
| 676 | 18.342857 | 79 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/recreate-Traas/FindPlotEvents_pipeline.py
|
# Combined pipeline to simplify analysis of turboSETI files
# Uses both find_event_pipeline and plot_event_pipeline turboSETI methods
# to create waterfall plots of the events found in a full cadence
def FindPlotEvents(dataDir, threshold=3):
'''
dataDir : string with directory housing both the .dat and .h5 files
returns : waterfall plots of data
'''
import os, glob
from turbo_seti.find_event.find_event_pipeline import find_event_pipeline
from turbo_seti.find_event.plot_event_pipeline import plot_event_pipeline
#%matplotlib inline
# create .lst file for .h5 files
h5list = sorted(glob.glob(dataDir + '/*.h5'))
h5listPath = os.path.join(dataDir, 'h5-list.lst')
with open(h5listPath, 'w') as L:
for h5 in h5list:
L.write(h5 + '\n')
# create .lst file for .dat files
datlist = sorted(glob.glob(dataDir + '/*.dat'))
datlistPath = os.path.join(dataDir, 'dat-list.lst')
with open(datlistPath, 'w') as L:
for dat in datlist:
L.write(dat+'\n')
# run find_event_pipeline
print('####################### Beginning Find Event Pipeline #######################')
csvPath = os.path.join(dataDir, 'events-list.csv')
find_event_pipeline(datlistPath, filter_threshold=threshold, number_in_cadence=len(datlist), csv_name=csvPath, saving=True);
# run plot_event_pipeline
print()
print('####################### Beginning Plot Event Pipeline #######################')
plot_event_pipeline(csvPath, h5listPath, filter_spec=f'{threshold}', user_validation=False)
| 1,589 | 36.857143 | 128 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/recreate-Traas/.ipynb_checkpoints/FindPlotEvents_pipeline-checkpoint.py
|
# Combined pipeline to simplify analysis of turboSETI files
# Uses both find_event_pipeline and plot_event_pipeline turboSETI methods
# to create waterfall plots of the events found in a full cadence
def FindPlotEvents(dataDir, threshold=3):
'''
dataDir : string with directory housing both the .dat and .h5 files
returns : waterfall plots of data
'''
import os, glob
from turbo_seti.find_event.find_event_pipeline import find_event_pipeline
from turbo_seti.find_event.plot_event_pipeline import plot_event_pipeline
#%matplotlib inline
# create .lst file for .h5 files
h5list = sorted(glob.glob(dataDir + '/*.h5'))
h5listPath = os.path.join(dataDir, 'h5-list.lst')
with open(h5listPath, 'w') as L:
for h5 in h5list:
L.write(h5 + '\n')
# create .lst file for .dat files
datlist = sorted(glob.glob(dataDir + '/*.dat'))
datlistPath = os.path.join(dataDir, 'dat-list.lst')
with open(datlistPath, 'w') as L:
for dat in datlist:
L.write(dat+'\n')
# run find_event_pipeline
print('####################### Beginning Find Event Pipeline #######################')
csvPath = os.path.join(dataDir, 'events-list.csv')
find_event_pipeline(datlistPath, filter_threshold=threshold, number_in_cadence=len(datlist), csv_name=csvPath, saving=True);
# run plot_event_pipeline
print()
print('####################### Beginning Plot Event Pipeline #######################')
plot_event_pipeline(csvPath, h5listPath, filter_spec=f'{threshold}', user_validation=False)
| 1,589 | 36.857143 | 128 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/Target-Selection/gbt-inTransit.py
|
# Imports
import os
import pandas as pd
import numpy as np
import pymysql
import urllib
from barycorrpy import utc_tdb
from astropy.time import Time
from astropy.coordinates import EarthLocation, SkyCoord
from astropy import units as u
# tunable variables for later
numobs = 3 # Set minumum number of observations for a target to be recorded
# Get TESS Targets in GBT go_scans database
BLtargets = pymysql.connect(host=os.environ['BLIP'],user=os.environ['BLUSR'],
password=os.environ['BLPASS'],database="BLtargets")
BLquery = """
SELECT *
FROM go_scans
WHERE target_name LIKE 'TIC%'
"""
go_scans = pd.read_sql(BLquery, BLtargets)
# Modify go_scans database for further calculations
go_scans['TIC ID'] = go_scans.target_name.apply(lambda v : int(v[3:]))
gbt = go_scans[['TIC ID', 'target_name', 'utc_observed', 'session']]
# Get full dataframe of TESS candidates
url = 'https://exofop.ipac.caltech.edu/tess/download_toi.php?sort=toi&output=csv'
toiPath = os.path.join(os.getcwd(), 'TESS-toi.csv')
urllib.request.urlretrieve(url, toiPath)
TESStoi = pd.read_csv(toiPath)
# Find TESS targets that transit during GBT observations
inTransitID = []
inTransitSession = []
gbtloc = EarthLocation.of_site('Green Bank Telescope')
for ticid in gbt['TIC ID'].unique():
gbtInfo = gbt.loc[gbt['TIC ID'] == ticid]
TESSinfo = TESStoi.loc[TESStoi['TIC ID'] == ticid]
epoch = TESSinfo['Epoch (BJD)'].to_numpy()[0] # BJD
period = TESSinfo['Period (days)'].to_numpy()[0]
transitLength = TESSinfo['Duration (hours)'].to_numpy()[0]
dist = TESSinfo['Stellar Distance (pc)'].to_numpy()[0]
PMRA = float(TESSinfo['PM RA (mas/yr)'].to_numpy()[0])
PMdec = float(TESSinfo['PM Dec (mas/yr)'].to_numpy()[0])
ra = TESSinfo['RA'].to_numpy()[0]
dec = TESSinfo['Dec'].to_numpy()[0]
coords = SkyCoord(ra, dec, unit=(u.hourangle, u.deg), frame='icrs')
obsTime = gbtInfo['utc_observed'].to_numpy()
session = gbtInfo['session'].to_numpy()
parallax = (1/dist) * 10**(-3) # units of mas
# Convert Observed time to BJD
tUTC = Time(obsTime, location=gbtloc)
tbjd = utc_tdb.JDUTC_to_BJDTDB(tUTC, ra=float(coords.to_string().split()[0]),
dec=float(coords.to_string().split()[1]),
pmra=PMRA, pmdec=PMdec,
px= parallax,
obsname='Green Bank Telescope')[0]
tbjd = tbjd[np.isfinite(tbjd)]
# Does the GBT observation occur during a transit?
if len(obsTime) >= numobs and period != 0: # Check for at least 3 obs
tt = transitLength/24/2 # half transit time and convert to days
# Create list of center transit times for each TIC ID
for ii, obst in enumerate(tbjd):
diff = np.abs(obst-epoch)
numRot = int(np.ceil(diff/period))
centerTransitTimes = []
t = epoch
for i in range(numRot):
centerTransitTimes.append(t)
if obst < epoch: # check if gbt obs happened before or after epoch
t-=period
else:
t+=period
# Since last value in transit time list is closest to observing time:
epochf = centerTransitTimes[-1]
startTransit = epochf - tt
endTransit = epochf + tt
if obst > startTransit and obst < endTransit:
inTransitID.append(ticid)
inTransitSession.append(session[ii])
# Extract go_scans info for transiting TESS Targets
outFrame = []
for tic, sess in zip(inTransitID, inTransitSession):
mask = (go_scans['TIC ID'] == tic) & (go_scans['session'] == sess)
if len(mask[mask==True]) >= numobs: # make sure target has 3 obs.
outFrame.append(go_scans[mask])
targets = pd.concat(outFrame).drop_duplicates()
outFilePath = os.path.join(os.getcwd(), 'TESStargets.csv')
# Return csv file and number of unique TESS targets found transiting
targets.to_csv(outFilePath)
print(f'{len(targets.target_name.unique())} TESS targets found in transit')
| 4,140 | 32.395161 | 82 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/Target-Selection/.ipynb_checkpoints/gbt-inTransit-checkpoint.py
|
# Imports
import os
import pandas as pd
import numpy as np
import pymysql
import urllib
from barycorrpy import utc_tdb
from astropy.time import Time
from astropy.coordinates import EarthLocation, SkyCoord
from astropy import units as u
# tunable variables for later
numobs = 3 # Set minumum number of observations for a target to be recorded
# Get TESS Targets in GBT go_scans database
BLtargets = pymysql.connect(host=os.environ['BLIP'],user=os.environ['BLUSR'],
password=os.environ['BLPASS'],database="BLtargets")
BLquery = """
SELECT *
FROM go_scans
WHERE target_name LIKE 'TIC%'
"""
go_scans = pd.read_sql(BLquery, BLtargets)
# Modify go_scans database for further calculations
go_scans['TIC ID'] = go_scans.target_name.apply(lambda v : int(v[3:]))
gbt = go_scans[['TIC ID', 'target_name', 'utc_observed', 'session']]
# Get full dataframe of TESS candidates
url = 'https://exofop.ipac.caltech.edu/tess/download_toi.php?sort=toi&output=csv'
toiPath = os.path.join(os.getcwd(), 'TESS-toi.csv')
urllib.request.urlretrieve(url, toiPath)
TESStoi = pd.read_csv(toiPath)
# Find TESS targets that transit during GBT observations
inTransitID = []
inTransitSession = []
gbtloc = EarthLocation.of_site('Green Bank Telescope')
for ticid in gbt['TIC ID'].unique():
gbtInfo = gbt.loc[gbt['TIC ID'] == ticid]
TESSinfo = TESStoi.loc[TESStoi['TIC ID'] == ticid]
epoch = TESSinfo['Epoch (BJD)'].to_numpy()[0] # BJD
period = TESSinfo['Period (days)'].to_numpy()[0]
transitLength = TESSinfo['Duration (hours)'].to_numpy()[0]
dist = TESSinfo['Stellar Distance (pc)'].to_numpy()[0]
PMRA = float(TESSinfo['PM RA (mas/yr)'].to_numpy()[0])
PMdec = float(TESSinfo['PM Dec (mas/yr)'].to_numpy()[0])
ra = TESSinfo['RA'].to_numpy()[0]
dec = TESSinfo['Dec'].to_numpy()[0]
coords = SkyCoord(ra, dec, unit=(u.hourangle, u.deg), frame='icrs')
obsTime = gbtInfo['utc_observed'].to_numpy()
session = gbtInfo['session'].to_numpy()
parallax = (1/dist) * 10**(-3) # units of mas
# Convert Observed time to BJD
tUTC = Time(obsTime, location=gbtloc)
tbjd = utc_tdb.JDUTC_to_BJDTDB(tUTC, ra=float(coords.to_string().split()[0]),
dec=float(coords.to_string().split()[1]),
pmra=PMRA, pmdec=PMdec,
px= parallax,
obsname='Green Bank Telescope')[0]
tbjd = tbjd[np.isfinite(tbjd)]
# Does the GBT observation occur during a transit?
if len(obsTime) >= numobs and period != 0: # Check for at least 3 obs
tt = transitLength/24/2 # half transit time and convert to days
# Create list of center transit times for each TIC ID
for ii, obst in enumerate(tbjd):
diff = np.abs(obst-epoch)
numRot = int(np.ceil(diff/period))
centerTransitTimes = []
t = epoch
for i in range(numRot):
centerTransitTimes.append(t)
if obst < epoch: # check if gbt obs happened before or after epoch
t-=period
else:
t+=period
# Since last value in transit time list is closest to observing time:
epochf = centerTransitTimes[-1]
startTransit = epochf - tt
endTransit = epochf + tt
if obst > startTransit and obst < endTransit:
inTransitID.append(ticid)
inTransitSession.append(session[ii])
# Extract go_scans info for transiting TESS Targets
outFrame = []
for tic, sess in zip(inTransitID, inTransitSession):
mask = (go_scans['TIC ID'] == tic) & (go_scans['session'] == sess)
if len(mask[mask==True]) >= numobs: # make sure target has 3 obs.
outFrame.append(go_scans[mask])
targets = pd.concat(outFrame).drop_duplicates()
outFilePath = os.path.join(os.getcwd(), 'TESStargets.csv')
# Return csv file and number of unique TESS targets found transiting
targets.to_csv(outFilePath)
print(f'{len(targets.target_name.unique())} TESS targets found in transit')
| 4,140 | 32.395161 | 82 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/Target-Selection/locate-TIC-files/read-targets-location.py
|
import numpy as np
import csv
targetLocs = np.loadtxt('targets-location.csv', dtype=str)
targets = np.loadtxt('target-list.csv', dtype=str)
whereTIC = {}
for target in targets:
ticid = target[1:-1]
listPaths = []
print('starting on {}'.format(ticid))
for path in targetLocs:
if path.find(ticid) != -1:
listPaths.append(path)
whereTIC[ticid] = listPaths
print()
print(whereTIC)
outFile = 'known-TIC-paths.csv'
with open(outFile, 'w') as f:
writer = csv.writer(f)
for key, val in whereTIC.items():
writer.writerow([key, val])
| 584 | 19.172414 | 58 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/plot_dat_mod.py
|
import os, glob, sys
from turbo_seti.find_event.plot_dat import plot_dat
from turbo_seti import find_event as find
import numpy as np
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dir', default=os.getcwd())
parser.add_argument('--minHit', type=float, default=None)
parser.add_argument('--maxHit', type=float, default=None)
args = parser.parse_args()
path = args.dir
dat_files = glob.glob(path + "*.dat")
min_hit = 1e9
max_hit = 0
if args.minHit == None or args.maxHit == None:
for file in dat_files:
tbl = find.read_dat(file)
min_freq, max_freq = min(tbl["Freq"]), max(tbl["Freq"])
if min_freq < min_hit:
min_hit = min_freq
if max_freq > max_hit:
max_hit = max_freq
else:
min_hit = args.minHit
max_hit = args.maxHit # set min and max hits by hand just to get this image
print("Lowest frequency hit: ", min_hit)
print("Highext frequency hit: ", max_hit)
plot_range = 2000*1e-6 # a 2000Hz width, adjusted to be in units of MHz
freq_range = np.arange(np.round(min_hit, 2), np.round(max_hit), plot_range)
outDir = path + "bautista-analysis/"
if not os.path.exists(outDir):
os.mkdir(outDir)
for center in freq_range:
plot_dat(path + "dat-list.lst",
path + "h5-list.lst",
path + "events-list.csv",
outdir=outDir,
check_zero_drift=False,
alpha=0.65,
color="black",
window=(center-0.001, center+0.001))
if __name__ == '__main__':
sys.exit(main())
| 1,710 | 29.553571 | 83 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/noahf_plot_event_pipeline.py
|
#!/usr/bin/env python3
r"""
Front-facing script to plot drifting, narrowband events in a set of generalized
cadences of ON-OFF radio SETI observations.
"""
import os
from operator import attrgetter
import pandas
from blimpy import Waterfall
import noahf_plot_event as plot_event
class PathRecord:
r''' Definition of an H5 path record '''
def __init__(self, path_h5, tstart, source_name):
self.path_h5 = path_h5
self.tstart = tstart
self.source_name = source_name
def __repr__(self):
return repr((self.path_h5, self.tstart, self.source_name))
def plot_event_pipeline(event_csv_string, fils_list_string, user_validation=False,
offset=0, filter_spec=None, sortby_tstart=True, plot_dir=None,
transit_times=None):
r"""
This function calls :func:`~turbo_seti.find_event.plot_event.plot_candidate_events` to
plot the events in an output .csv file generated by find_event_pipeline.py
Parameters
----------
event_csv_string : str
The string name of a .csv file that contains the
list of events at a given filter level, created as
output from find_event_pipeline.py. The
.csv should have a filename containing information
about its parameters, for example
"kepler1093b_0015_f2_snr10.csv"
Remember that the file was created with some cadence
(ex. ABACAD) and ensure that the cadence matches the
order of the files in fils_list_string
fils_list_string : str
The string name of a plaintext file ending in .lst
that contains the filenames of .fil files, each on a
new line, that corresponds to the cadence used to
create the .csv file used for event_csv_string.
user_validation : bool, optional
A True/False flag that, when set to True, asks if the
user wishes to continue with their input parameters
(and requires a 'y' or 'n' typed as confirmation)
before beginning to run the program. Recommended when
first learning the program, not recommended for
automated scripts.
offset : int, optional
The amount that the overdrawn "best guess" line from
the event parameters in the csv should be shifted from
its original position to enhance readability. Can be
set to 0 (default; draws line on top of estimated
event) or 'auto' (shifts line to the left by an auto-
calculated amount, with addition lines showing original
position).
sortby_tstart : bool
If True, the input file list is sorted by header.tstart.
Examples
--------
>>> import plot_event_pipeline;
... plot_event_pipeline.plot_event_pipeline(event_csv_string, fils_list_string,
... user_validation=False, offset=0)
"""
#reading in the .csv containing the events
try:
candidate_event_dataframe = pandas.read_csv(event_csv_string, comment='#')
print("plot_event_pipeline: Opened file {}".format(event_csv_string))
except:
print("*** plot_event_pipeline: Oops, cannot access file {}".format(event_csv_string))
return
fil_file_list = []
for file in pandas.read_csv(fils_list_string, encoding='utf-8', header=None, chunksize=1):
fil_file_list.append(file.iloc[0,0])
#obtaining source names
source_name_list = []
path_record = []
for fil in fil_file_list:
wf = Waterfall(fil, load_data=False)
source_name = wf.container.header["source_name"]
source_name_list.append(source_name)
tstart = wf.container.header["tstart"]
path_record.append(PathRecord(fil, tstart, source_name))
# If sorting by header.tstart, then rewrite the dat_file_list in header.tstart order.
if sortby_tstart:
path_record = sorted(path_record, key=attrgetter('tstart'))
fil_file_list = []
for obj in path_record:
fil_file_list.append(obj.path_h5)
print("plot_event_pipeline: file = {}, tstart = {}, source_name = {}"
.format(os.path.basename(obj.path_h5), obj.tstart, obj.source_name))
else:
for obj in path_record:
print("plot_event_pipeline: file = {}, tstart = {}, source_name = {}"
.format(os.path.basename(obj.path_h5), obj.tstart, obj.source_name))
#get rid of bytestring "B'"s if they're there (early versions of
#seti_event.py added "B'"s to all of the source names)
on_source_name_original = candidate_event_dataframe.Source[0]
if on_source_name_original[0] == 'B' and on_source_name_original[-1] == '\'':
on_source_name = on_source_name_original[2:-2]
else:
on_source_name = on_source_name_original
candidate_event_dataframe = candidate_event_dataframe.replace(to_replace=on_source_name_original,
value=on_source_name)
# Establish filter-level from filter_spec (preferred)
# or 3rd token of the .csv path (don't break an existing caller)
if filter_spec is None:
filter_level = event_csv_string.split('_')[2]
else:
filter_level = filter_spec
#begin user validation
print("Plotting some events for: ", on_source_name)
print("There are " + str(len(candidate_event_dataframe.Source)) + " total events in the csv file " + event_csv_string)
print("therefore, you are about to make " + str(len(candidate_event_dataframe.Source)) + " .png files.")
if user_validation:
question = "Do you wish to proceed with these settings?"
while "the answer is invalid":
reply = str(input(question+' (y/n): ')).lower().strip()
if reply == '':
return
if reply[0] == 'y':
break
if reply[0] == 'n':
return
#move to plot_event.py for the actual plotting
plot_event.plot_candidate_events(candidate_event_dataframe,
fil_file_list,
filter_level,
source_name_list,
offset=offset,
plot_dir=plot_dir,
transit=transit_times)
| 6,336 | 40.418301 | 122 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/assign-band.py
|
import os, sys, glob
import subprocess as sp
import numpy as np
from analysis_pipeline import getPaths, multiCommand
from blimpy.io.hdf_reader import H5Reader
def band(dir, tol=0.7):
L = [1.10, 1.90]
S = [1.80, 2.80]
C = [4.00, 7.80]
X = [7.80, 11.20]
h5List = glob.glob(dir+'/*.h5')
minFreqs = []
maxFreqs = []
for file in h5List:
h5 = H5Reader(file, load_data=False)
hdr = h5.read_header()
maxf = hdr['fch1'] * 10**-3
minf = maxf - np.abs(hdr['foff']*hdr['nchans'])*10**-3
minFreqs.append(minf)
maxFreqs.append(maxf)
dirMinf = min(minFreqs)
dirMaxf = max(maxFreqs)
#print(f'min frequency: {dirMinf}\nmax frequency: {dirMaxf}')
if abs(dirMinf-L[0]) < tol and abs(dirMaxf-L[1]) < tol:
return 'L'
elif abs(dirMinf-S[0]) < tol and abs(dirMaxf-S[1]) < tol:
return 'S'
elif abs(dirMinf-C[0]) < tol and abs(dirMaxf-C[1]) < tol:
return 'C'
elif abs(dirMinf-X[0]) < tol and abs(dirMaxf-X[1]) < tol:
return 'X'
else:
return 'NA'
def main():
dirs = getPaths()
cmds = []
for dir in dirs:
dd = dir[10:]
print(f'Finding band for {dir}')
if len(dir) != 0:
node = dir[5:10]
b = band(dir)
print(f'Appending band = {b} to file name')
cmd = ['ssh', node, f'mv {dd} {dd}_{b}']
cmds.append(cmd)
print('Running: ', cmd)
ps = multiCommand(cmds)
for p in ps:
p.communicate()
if __name__ == '__main__':
sys.exit(main())
| 1,602 | 20.958904 | 65 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/noahf_plot_event.py
|
#!/usr/bin/env python3
r'''
Backend script to plot drifting, narrowband events in a generalized cadence of
ON-OFF radio SETI observations. The main function contained in this file is
:func:`~.plot_candidate_events` uses the other helper functions
in this file (described below) to plot events from a turboSETI event .csv file.
'''
from os import mkdir
from os.path import dirname, abspath, isdir
import gc
import logging
logger_plot_event_name = 'plot_event'
logger_plot_event = logging.getLogger(logger_plot_event_name)
logger_plot_event.setLevel(logging.INFO)
# Plotting packages import
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('agg')
# Math/Science package imports
import numpy as np
from astropy.time import Time
# BL imports
import blimpy as bl
from blimpy.utils import rebin
# preliminary plot arguments
fontsize=16
font = {'family' : 'DejaVu Sans',
'size' : fontsize}
MAX_IMSHOW_POINTS = (4096, 1268)
def overlay_drift(f_event, f_start, f_stop, drift_rate, t_duration, offset=0, alpha=1, color='#cc0000'):
r'''
Creates a dashed red line at the recorded frequency and drift rate of
the plotted event - can overlay the signal exactly or be offset by
some amount (offset can be 0 or 'auto').
'''
# determines automatic offset and plots offset lines
if offset == 'auto':
offset = ((f_start - f_stop) / 10)
plt.plot((f_event - offset, f_event),
(10, 10),
"o-",
c=color,
lw=2,
alpha=alpha)
# plots drift overlay line, with offset if desired
plt.plot((f_event + offset, f_event + drift_rate/1e6 * t_duration + offset),
(0, t_duration),
c=color,
ls='dashed', lw=2,
alpha=alpha)
def plot_waterfall(wf, source_name, f_start=None, f_stop=None, transit_times=None, **kwargs):
r"""
Plot waterfall of data in a .fil or .h5 file.
Parameters
----------
wf : blimpy.Waterfall object
Waterfall object of an H5 or Filterbank file containing the dynamic spectrum data.
source_name : str
Name of the target.
f_start : float
Start frequency, in MHz.
f_stop : float
Stop frequency, in MHz.
transit_times : list w/ len 2
list of transit start and transit end
kwargs : dict
Keyword args to be passed to matplotlib imshow().
Notes
-----
Plot a single-panel waterfall plot (frequency vs. time vs. intensity)
for one of the on or off observations in the cadence of interest, at the
frequency of the expected event. Calls :func:`~overlay_drift`
"""
# prepare font
matplotlib.rc('font', **font)
# Load in the data from fil
plot_f, plot_data = wf.grab_data(f_start=f_start, f_stop=f_stop)
# Make sure waterfall plot is under 4k*4k
dec_fac_x, dec_fac_y = 1, 1
# rebinning data to plot correctly with fewer points
try:
if plot_data.shape[0] > MAX_IMSHOW_POINTS[0]:
dec_fac_x = plot_data.shape[0] / MAX_IMSHOW_POINTS[0]
if plot_data.shape[1] > MAX_IMSHOW_POINTS[1]:
dec_fac_y = int(np.ceil(plot_data.shape[1] / MAX_IMSHOW_POINTS[1]))
plot_data = rebin(plot_data, dec_fac_x, dec_fac_y)
except Exception as ex:
print('\n*** Oops, grab_data returned plot_data.shape={}, plot_f.shape={}'
.format(plot_data.shape, plot_f.shape))
print('Waterfall info for {}:'.format(wf.filename))
wf.info()
raise ValueError('*** Something is wrong with the grab_data output!') from ex
# Rolled back PR #82
# determine extent of the plotting panel for imshow
extent=(plot_f[0], plot_f[-1], (wf.timestamps[-1]-wf.timestamps[0])*24.*60.*60, 0.0)
# plot and scale intensity (log vs. linear)
kwargs['cmap'] = kwargs.get('cmap', 'viridis')
plot_data = 10.0 * np.log10(plot_data)
# get normalization parameters
vmin = plot_data.min()
vmax = plot_data.max()
normalized_plot_data = (plot_data - vmin) / (vmax - vmin)
# display the waterfall plot
this_plot = plt.imshow(normalized_plot_data,
aspect='auto',
rasterized=True,
interpolation='nearest',
extent=extent,
**kwargs
)
if transit_times:
start_transit, stop_transit = min(transit_times), max(transit_times)
if start_transit > min(wf.timestamps):
plt.axhline(start_transit, 'r--', label='Transit Start')
if stop_transit < max(wf.timestamps):
plt.axhline(max(transit_times), 'r--', label='Transit End')
# add plot labels
plt.xlabel("Frequency [Hz]",fontdict=font)
plt.ylabel("Time [s]",fontdict=font)
# add source name
ax = plt.gca()
plt.text(0.03, 0.8, source_name, transform=ax.transAxes, bbox=dict(facecolor='white'))
# if plot_snr != False:
# plt.text(0.03, 0.6, plot_snr, transform=ax.transAxes, bbox=dict(facecolor='white'))
# return plot
del plot_f, plot_data
gc.collect()
return this_plot
def make_waterfall_plots(fil_file_list, on_source_name, f_start, f_stop, drift_rate, f_mid,
filter_level, source_name_list, offset=0, plot_dir=None, plotTransit=None,
**kwargs):
r'''
Makes waterfall plots of an event for an entire on-off cadence.
Parameters
----------
fil_file_list : str
List of filterbank files in the cadence.
on_source_name : str
Name of the on_source target.
f_start : float
Start frequency, in MHz.
f_stop : float
Stop frequency, in MHz.
drift_rate : float
Drift rate in Hz/s.
f_mid : float
<iddle frequency of the event, in MHz.
filter_level : int
Filter level (1, 2, or 3) that produced the event.
source_name_list : list
List of source names in the cadence, in order.
bandwidth : int
Width of the plot, incorporating drift info.
kwargs : dict
Keyword args to be passed to matplotlib imshow().
Notes
-----
Makes a series of waterfall plots, to be read from top to bottom, displaying a full cadence
at the frequency of a recorded event from find_event. Calls :func:`~plot_waterfall`
'''
global logger_plot_event
# prepare for plotting
matplotlib.rc('font', **font)
# set up the sub-plots
n_plots = len(fil_file_list)
fig = plt.subplots(n_plots, sharex=True, sharey=True,figsize=(10, 2*n_plots))
# get directory path for storing PNG files
if plot_dir is None:
dirpath = dirname(abspath(fil_file_list[0])) + '/'
else:
if not isdir(plot_dir):
mkdir(plot_dir)
dirpath = plot_dir
# read in data for the first panel
max_load = bl.calcload.calc_max_load(fil_file_list[0])
#print('plot_event make_waterfall_plots: max_load={} is required for {}'.format(max_load, fil_file_list[0]))
wf1 = bl.Waterfall(fil_file_list[0], f_start=f_start, f_stop=f_stop, max_load=max_load)
t0 = wf1.header['tstart']
plot_f1, plot_data1 = wf1.grab_data()
# rebin data to plot correctly with fewer points
dec_fac_x, dec_fac_y = 1, 1
if plot_data1.shape[0] > MAX_IMSHOW_POINTS[0]:
dec_fac_x = plot_data1.shape[0] / MAX_IMSHOW_POINTS[0]
if plot_data1.shape[1] > MAX_IMSHOW_POINTS[1]:
dec_fac_y = int(np.ceil(plot_data1.shape[1] / MAX_IMSHOW_POINTS[1]))
plot_data1 = rebin(plot_data1, dec_fac_x, dec_fac_y)
# define more plot parameters
# never used: delta_f = 0.000250
mid_f = np.abs(f_start+f_stop)/2.
subplots = []
del wf1, plot_f1, plot_data1
gc.collect()
# Fill in each subplot for the full plot
for ii, filename in enumerate(fil_file_list):
logger_plot_event.debug('make_waterfall_plots: file {} in list: {}'.format(ii, filename))
# identify panel
subplot = plt.subplot(n_plots, 1, ii + 1)
subplots.append(subplot)
# read in data
max_load = bl.calcload.calc_max_load(filename)
#print('plot_event make_waterfall_plots: max_load={} is required for {}'.format(max_load, filename))
wf = bl.Waterfall(filename, f_start=f_start, f_stop=f_stop, max_load=max_load)
# make plot with plot_waterfall
source_name = source_name_list[ii]
if plotTransit:
this_plot = plot_waterfall(wf,
source_name,
f_start=f_start,
f_stop=f_stop,
transit_times=plotTransit[ii]
**kwargs)
else:
this_plot = plot_waterfall(wf,
source_name,
f_start=f_start,
f_stop=f_stop,
**kwargs)
# calculate parameters for estimated drift line
t_elapsed = Time(wf.header['tstart'], format='mjd').unix - Time(t0, format='mjd').unix
t_duration = (wf.n_ints_in_file - 1) * wf.header['tsamp']
f_event = f_mid + drift_rate / 1e6 * t_elapsed
# plot estimated drift line
overlay_drift(f_event, f_start, f_stop, drift_rate, t_duration, offset)
# Title the full plot
if ii == 0:
plot_title = "%s \n $\\dot{\\nu}$ = %2.3f Hz/s, MJD:%5.5f" % (on_source_name, drift_rate, t0)
plt.title(plot_title)
# Format full plot
if ii < len(fil_file_list)-1:
plt.xticks(np.linspace(f_start, f_stop, num=4), ['','','',''])
del wf
gc.collect()
# More overall plot formatting, axis labelling
factor = 1e6
units = 'Hz'
#ax = plt.gca()
#ax.get_xaxis().get_major_formatter().set_useOffset(False)
xloc = np.linspace(f_start, f_stop, 5)
xticks = [round(loc_freq) for loc_freq in (xloc - mid_f)*factor]
if np.max(xticks) > 1000:
xticks = [xt/1000 for xt in xticks]
units = 'kHz'
plt.xticks(xloc, xticks)
plt.xlabel("Relative Frequency [%s] from %f MHz"%(units,mid_f),fontdict=font)
# Add colorbar
cax = fig[0].add_axes([0.94, 0.11, 0.03, 0.77])
fig[0].colorbar(this_plot,cax=cax,label='Normalized Power (Arbitrary Units)')
# Adjust plots
plt.subplots_adjust(hspace=0,wspace=0)
# save the figures
path_png = dirpath + str(filter_level) + '_' + on_source_name + '_dr_' + "{:0.2f}".format(drift_rate) + '_freq_' "{:0.6f}".format(f_start) + ".png"
plt.savefig(path_png, bbox_inches='tight')
logger_plot_event.debug('make_waterfall_plots: Saved file {}'.format(path_png))
# show figure before closing if this is an interactive context
mplbe = matplotlib.get_backend()
logger_plot_event.debug('make_waterfall_plots: backend = {}'.format(mplbe))
if mplbe != 'agg':
plt.show()
# close all figure windows
plt.close('all')
return subplots
def plot_candidate_events(candidate_event_dataframe, fil_file_list, filter_level, source_name_list,
offset=0, plot_dir=None, transit=None, **kwargs):
r'''
Calls :func:`~make_waterfall_plots` on each event in the input .csv file.
Arguments
---------
candidate_event_dataframe : dict
A pandas dataframe containing information
about a candidate event. The necessary data
includes the start and stop frequencies, the
drift rate, and the source name. To determine
the required variable names and formatting
conventions, see the output of
find_event_pipeline.
fil_file_list : list
A Python list that contains a series of
strings corresponding to the filenames of .fil
files, each on a new line, that corresponds to
the cadence used to create the .csv file used
for event_csv_string.
filter_level : int
A string indicating the filter level of the
cadence used to generate the
candidate_event_dataframe. Used only for
output file naming, convention is "f1", "f2",
or "f3". Descriptions for the three levels of
filtering can be found in the documentation
for find_event.py
source_name_list : list
A Python list that contains a series of strings
corresponding to the source names of the
cadence in chronological (descending through
the plot panels) cadence.
offset : int, optional
The amount that the overdrawn "best guess"
line from the event parameters in the csv
should be shifted from its original position
to enhance readability. Can be set to 0
(default; draws line on top of estimated
event) or 'auto' (shifts line to the left by
an auto-calculated amount, with addition lines
showing original position).
kwargs : dict
Examples
--------
It is highly recommended that users interact with this program via the
front-facing plot_event_pipeline.py script. See the usage of that file in
its own documentation.
If you would like to run plot_candidate_events without calling
plot_event_pipeline.py, the usage is as follows:
>>> plot_event.plot_candidate_events(candidate_event_dataframe, fil_file_list,
... filter_level, source_name_list, offset=0)
'''
global logger_plot_event
# load in the data for each individual hit
if candidate_event_dataframe is None:
print('*** plot_candidate_events: candidate_event_dataframe is None, nothing to do.')
return
len_df = len(candidate_event_dataframe)
if len_df < 1:
print('*** plot_candidate_events: len(candidate_event_dataframe) = 0, nothing to do.')
return
for i in range(0, len_df):
candidate = candidate_event_dataframe.iloc[i]
on_source_name = candidate['Source']
f_mid = candidate['Freq']
drift_rate = candidate['DriftRate']
# calculate the length of the total cadence from the fil files' headers
first_fil = bl.Waterfall(fil_file_list[0], load_data=False)
tfirst = first_fil.header['tstart']
last_fil = bl.Waterfall(fil_file_list[-1], load_data=False)
tlast = last_fil.header['tstart']
t_elapsed = Time(tlast, format='mjd').unix - Time(tfirst, format='mjd').unix + (last_fil.n_ints_in_file -1) * last_fil.header['tsamp']
# calculate the width of the plot based on making sure the full drift is visible
bandwidth = 2.4 * abs(drift_rate)/1e6 * t_elapsed
bandwidth = np.max((bandwidth, 500./1e6))
# Get start and stop frequencies based on midpoint and bandwidth
f_start, f_stop = np.sort((f_mid - (bandwidth/2), f_mid + (bandwidth/2)))
# logger_plot_event.debug useful values
logger_plot_event.debug('*************************************************')
logger_plot_event.debug('*** The Parameters for This Plot Are: ****')
logger_plot_event.debug('Target = {}'.format(on_source_name))
logger_plot_event.debug('Bandwidth = {} MHz'.format(round(bandwidth, 5)))
logger_plot_event.debug('Time Elapsed (inc. Slew) = {} s'.format(round(t_elapsed)))
logger_plot_event.debug('Middle Frequency = {} MHz'.format(round(f_mid, 4)))
logger_plot_event.debug('Expected Drift = {} Hz/s'.format(round(drift_rate, 4)))
logger_plot_event.debug('*************************************************')
# Pass info to make_waterfall_plots() function
make_waterfall_plots(fil_file_list,
on_source_name,
f_start,
f_stop,
drift_rate,
f_mid,
filter_level,
source_name_list,
offset=offset,
plot_dir=plot_dir,
plotTransit=transit,
**kwargs)
| 16,187 | 36.472222 | 151 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/analysis_pipeline.py
|
import os, glob, sys
import subprocess as sp
import numpy as np
def getPaths():
cmd = 'find /mnt_blc*/datax2/scratch/noahf/ -type d -name *TOI*'
find = sp.Popen(cmd, stdout=sp.PIPE, shell=True)
dirs = find.communicate()[0].split(b'\n')
dirsToReturn = []
for dir in dirs:
dd = dir.decode()
if dd[-7:] != '-copied':
dirsToReturn.append(dd)
return dirsToReturn
def multiCommand(commands, slowdebug=False):
'''
Run n commands on n compute nodes
nodes [list] : list of compute nodes to run on
commands [list] : list of commands to run on each compute nodes, the first
command will be run on the first compute node, etc.
slowdebug [bool] : if True, prints subprocess output as it goes
returns list of subprocess Popen objects, one for each compute node
'''
# Run on separate compute nodes
ps = []
for cmd in commands:
ssh = sp.Popen(cmd, universal_newlines=True, stdout=sp.PIPE, stderr=sp.PIPE, stdin=sp.PIPE)
ps.append(ssh)
if slowdebug:
print(ssh.stdout.readlines(), ssh.stderr.readlines())
return ps
def main():
allDirs = getPaths()
condaenv = '/home/noahf/miniconda3/bin/activate'
cmds = []
for dd in allDirs:
if len(dd) > 1:
node = dd[5:10]
cmd = ['ssh', node, f"source {condaenv} runTurbo ; python3 ~/BL-TESSsearch/analysis/FindPlot.py --dir {dd[10:]}"]
cmds.append(cmd)
print(f'Running {cmd}')
ps = multiCommand(cmds)
for p in ps:
p.communicate()
if __name__ == '__main__':
sys.exit(main())
| 1,669 | 24.692308 | 125 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/find_target_transitTimes.py
|
# Imports
import os
import pandas as pd
import numpy as np
import pymysql
import urllib
from barycorrpy import utc_tdb
from astropy.time import Time
from astropy.coordinates import EarthLocation, SkyCoord
from astropy import units as u
# tunable variables for later
numobs = 3 # Set minumum number of observations for a target to be recorded
# Get TESS Targets in GBT go_scans database
BLtargets = pymysql.connect(host=os.environ['BLIP'],user=os.environ['BLUSR'],
password=os.environ['BLPASS'],database="BLtargets")
BLquery = """
SELECT *
FROM go_scans
WHERE target_name LIKE 'TIC%'
"""
go_scans = pd.read_sql(BLquery, BLtargets)
# Modify go_scans database for further calculations
go_scans['TIC ID'] = go_scans.target_name.apply(lambda v : int(v[3:]))
gbt = go_scans[['TIC ID', 'target_name', 'utc_observed', 'session']]
TESStoi = pd.read_csv('/home/noahf/BL-TESSsearch/Target-Selection/TESS-toi.csv')
# Find TESS targets that transit during GBT observations
inTransitID = []
inTransitSession = []
transitTimes = {'ticid' : [], 'session' : [], 'ingress' : [], 'egress' : []}
gbtloc = EarthLocation.of_site('Green Bank Telescope')
go_scans['StartObs'] = np.ones(len(go_scans))
for ticid in gbt['TIC ID'].unique():
gbtInfo = gbt.loc[gbt['TIC ID'] == ticid]
TESSinfo = TESStoi.loc[TESStoi['TIC ID'] == ticid]
epoch = TESSinfo['Epoch (BJD)'].to_numpy()[0] # BJD
period = TESSinfo['Period (days)'].to_numpy()[0]
transitLength = TESSinfo['Duration (hours)'].to_numpy()[0]
dist = TESSinfo['Stellar Distance (pc)'].to_numpy()[0]
PMRA = float(TESSinfo['PM RA (mas/yr)'].to_numpy()[0])
PMdec = float(TESSinfo['PM Dec (mas/yr)'].to_numpy()[0])
ra = TESSinfo['RA'].to_numpy()[0]
dec = TESSinfo['Dec'].to_numpy()[0]
coords = SkyCoord(ra, dec, unit=(u.hourangle, u.deg), frame='icrs')
obsTime = gbtInfo['utc_observed'].to_numpy()
session = gbtInfo['session'].to_numpy()
parallax = (1/dist) * 10**(-3) # units of mas
# Convert Observed time to BJD
tUTC = Time(obsTime, location=gbtloc)
tbjd = utc_tdb.JDUTC_to_BJDTDB(tUTC, ra=float(coords.to_string().split()[0]),
dec=float(coords.to_string().split()[1]),
pmra=PMRA, pmdec=PMdec,
px= parallax,
obsname='Green Bank Telescope')[0]
tbjd = tbjd[np.isfinite(tbjd)]
whereTID = np.where(go_scans['TIC ID'] == ticid)[0]
if len(tbjd) > 0:
go_scans.iloc[whereTID, -1] = tbjd
# Does the GBT observation occur during a transit?
if len(obsTime) >= numobs and period != 0: # Check for at least 3 obs
tt = transitLength/24/2 # half transit time and convert to days
# Create list of center transit times for each TIC ID
for ii, obst in enumerate(tbjd):
diff = np.abs(obst-epoch)
numRot = int(np.ceil(diff/period))
centerTransitTimes = []
t = epoch
for i in range(numRot):
centerTransitTimes.append(t)
if obst < epoch: # check if gbt obs happened before or after epoch
t-=period
else:
t+=period
# Since last value in transit time list is closest to observing time:
epochf = centerTransitTimes[-1]
startTransit = epochf - tt
endTransit = epochf + tt
if obst > startTransit and obst < endTransit:
transitTimes['ticid'].append(ticid)
transitTimes['session'].append(session[ii])
transitTimes['ingress'].append(startTransit)
transitTimes['egress'].append(endTransit)
#transitTimes['StartObs'].append(obst)
# Extract go_scans info for transiting TESS Targets
outFrame = []
go_scans['ingress'] = np.ones(len(go_scans['TIC ID']))
go_scans['egress'] = np.ones(len(go_scans['TIC ID']))
# go_scans['StartObs'] = np.ones(len(go_scans['TIC ID']))
for ii in range(len(transitTimes['ticid'])):
tic = transitTimes['ticid'][ii]
sess = transitTimes['session'][ii]
mask = np.where((go_scans['TIC ID'].to_numpy() == tic) & (go_scans['session'].to_numpy() == sess))[0]
if len(mask) >= numobs: # make sure target has 3 obs
go_scans.iloc[mask, -2] = transitTimes['ingress'][ii]
go_scans.iloc[mask, -1] = transitTimes['egress'][ii]
# go_scans.iloc[mask, -1] = transitTimes['StartObs'][ii]
outFrame.append(go_scans.iloc[mask])
times = pd.concat(outFrame)
pd.options.display.float_format = '{:.10f}'.format
#print(times[times.target_name == 'TIC121338379'])
# Get observation end times
totObsTime = 10/60/24 # days
times['StartObs_max'] = times['StartObs']
groupedTime = times.groupby(['target_name', 'receiver']).agg({
'StartObs': 'min',
'StartObs_max' : 'max',
'TIC ID' : 'min',
'session' : 'min',
'ingress' : 'min',
'egress' : 'min'
}).reset_index()
groupedTime['EndObs'] = groupedTime['StartObs_max'] + totObsTime
print(groupedTime.EndObs - groupedTime.StartObs)
# totObsTime = 30/60/24 # days
# groupedTime = times.drop_duplicates(['target_name', 'receiver'])
# withEnd = groupedTime.assign(EndObs = groupedTime['StartObs'] + totObsTime)
# Return csv file and number of unique TESS targets found transiting
groupedTime.to_csv('TransitTimes.csv')
print(groupedTime)
| 5,498 | 32.944444 | 105 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/FindPlot.py
|
# Combined pipeline to simplify analysis of turboSETI files
# Uses both find_event_pipeline and plot_event_pipeline turboSETI methods
# to create waterfall plots of the events found in a full cadence
import os, glob, sys
import subprocess as sp
import urllib
import pandas as pd
import pymysql
import numpy as np
from barycorrpy import utc_tdb
from astropy.coordinates import SkyCoord, EarthLocation
from astropy.time import Time
from astropy import units as u
def getLen(dir):
files = glob.glob(dir+'/*.dat')
#print(files)
return len(files)
def FindTransitTimes(dataDir):
'''
Queries the TESS TOI webpage and go_scans database to get information
on the transits of the ON TESS target to plot the start and end
returns : list of transit times, first is the start and second is the end
'''
# Get full dataframe of TESS candidates
toiPath = os.path.join(os.getcwd(), 'TESS-toi.csv')
if not os.path.exists(toiPath):
url = 'https://exofop.ipac.caltech.edu/tess/download_toi.php?sort=toi&output=csv'
urllib.request.urlretrieve(url, toiPath)
TESStoi = pd.read_csv(toiPath)
# Get TESS Targets in GBT go_scans database
BLclient = pymysql.connect(host=os.environ['GCP_IP'],user=os.environ['GCP_USR'],
password=os.environ['GCP_PASS'],database="FileTracking")
BLquery = """
SELECT *
FROM `infiles`
WHERE turboSETI='TRUE'
"""
go_scans = pd.read_sql(BLquery, BLclient)
# Get timing info on TESS target
onTarget = sorted(glob.glob(dataDir + '/*.dat'))[0].split('.')[0].split('_')[-2]
on_toi = np.where(TESStoi['TIC ID'].to_numpy() == int(onTarget[3:]))[0]
on_scans = np.where(go_scans['toi'].to_numpy() == onTarget)[0]
epoch = TESStoi['Epoch (BJD)'].to_numpy()[on_toi]
period = TESStoi['Period (days)'].to_numpy()[on_toi]
tt = TESStoi['Duration (hours)'].to_numpy()[on_toi]/24/2
obsTime = go_scans['obs_time'].to_numpy()[on_scans]
dist = TESStoi['Stellar Distance (pc)'].to_numpy()[0]
PMRA = float(TESStoi['PM RA (mas/yr)'].to_numpy()[0])
PMdec = float(TESStoi['PM Dec (mas/yr)'].to_numpy()[0])
ra = TESStoi['RA'].to_numpy()[0]
dec = TESStoi['Dec'].to_numpy()[0]
coords = SkyCoord(ra, dec, unit=(u.hourangle, u.deg), frame='icrs')
parallax = (1/dist) * 10**(-3) # units of mas
# Convert
gbtloc = EarthLocation.of_site('Green Bank Telescope')
tUTC = Time(obsTime, format='mjd', scale='utc', location=gbtloc)
tbjd = utc_tdb.JDUTC_to_BJDTDB(tUTC, ra=float(coords.to_string().split()[0]),
dec=float(coords.to_string().split()[1]),
pmra=PMRA, pmdec=PMdec,
px=parallax,
obsname='Green Bank Telescope')[0]
transitTimes = []
for obst in tbjd:
diff = np.abs(obst-epoch)
numRot = int(np.ceil(diff/period))
centerTransitTimes = []
t = epoch
for i in range(numRot):
centerTransitTimes.append(t)
if obst < epoch: # check if gbt obs happened before or after epoch
t-=period
else:
t+=period
# Since last value in transit time list is closest to observing time:
epochf = centerTransitTimes[-1] # Units of days in BJD
startTransit = epochf - tt
endTransit = epochf + tt
start_end = np.array([startTransit[0], endTransit[0]])
normTimes = (start_end - obst) * 24 * 3600
transitTimes.append(normTimes)
return transitTimes
def FindPlotEvents_1cad(dataDir, threshold=3, transitTimes=True):
'''
dataDir : string with directory housing both the .dat and .h5 files
returns : waterfall plots of data
'''
from turbo_seti.find_event.find_event_pipeline import find_event_pipeline
if transitTimes:
transitTimes = FindTransitTimes(dataDir)
print(transitTimes)
else:
transitTimes = None
# create .lst file for .h5 files
h5list = sorted(glob.glob(dataDir + '/*.h5'))
h5listPath = os.path.join(dataDir, 'h5-list.lst')
if len(h5list) != 0:
with open(h5listPath, 'w') as L:
for h5 in h5list:
L.write(h5 + '\n')
else:
print(f'Using existing h5 list under h5-list.lst')
# create .lst file for .dat files
datlist = sorted(glob.glob(dataDir + '/*.dat'))
datlistPath = os.path.join(dataDir, 'dat-list.lst')
with open(datlistPath, 'w') as L:
for dat in datlist:
L.write(dat+'\n')
if len(datlist) == 6:
# run find_event_pipeline
print('####################### Beginning Find Event Pipeline #######################')
csvPath = os.path.join(dataDir, 'events-list.csv')
find_event_pipeline(datlistPath, filter_threshold=threshold, number_in_cadence=len(datlist), csv_name=csvPath, saving=True);
# run plot_event_pipeline
print()
print('####################### Beginning Plot Event Pipeline #######################')
if os.path.exists(csvPath):
if transitTimes:
# Import local functions
from noahf_plot_event_pipeline import plot_event_pipeline
plot_event_pipeline(csvPath, h5listPath, filter_spec=f'{threshold}', user_validation=False, transit_times=transitTimes)
else:
from turbo_seti.find_event.plot_event_pipeline import plot_event_pipeline
plot_event_pipeline(csvPath, h5listPath, filter_spec=f'{threshold}', user_validation=False)
else:
raise Exception(f'length of input cadence to find_event_pipeline is {len(datlist)} not 6')
def FindPlotEvents_ncad(dataDir, threshold=3, transitTimes=True):
from turbo_seti.find_event.find_event_pipeline import find_event_pipeline
#from blimpy import Waterfall
from blimpy.io.hdf_reader import H5Reader
if transitTimes:
transitTimes = FindTransitTimes(dataDir)
print(transitTimes)
else:
transitTimes = None
# create .lst file for .h5 files
h5list = np.array(sorted(glob.glob(dataDir + '/*.h5')))
datlist = np.array(sorted(glob.glob(dataDir + '/*.dat')))
cns = np.array([file.split('/')[-1][:5] for file in h5list])
h5cadences = []
datcadences = []
for cn in np.unique(cns):
if cn[0] == 'b':
wherecn = np.where(cns == cn)[0]
h5cad = h5list[wherecn]
datcad = datlist[wherecn]
fch1s = []
nchans = []
for file in h5cad:
h5 = H5Reader(file, load_data=False)
hdr = h5.read_header()
fch1s.append(hdr['fch1'])
nchans.append(hdr['nchans'])
print(fch1s, nchans)
if len(np.unique(fch1s)) == 1 and len(np.unique(nchans)) == 1:
h5cadences.append(h5cad)
datcadences.append(datcad)
else:
raise Exception('More than one cadence in directory')
for ii in range(len(h5cadences)):
h5listPath = os.path.join(dataDir, f'h5-list-{ii}.lst')
with open(h5listPath, 'w') as L:
for h5 in h5cadences[ii]:
L.write(h5 + '\n')
datlistPath = os.path.join(dataDir, f'dat-list-{ii}.lst')
with open(datlistPath, 'w') as L:
for dat in datcadences[ii]:
L.write(dat+'\n')
if len(datcadences[ii]) == 6:
# run find_event_pipeline
print('####################### Beginning Find Event Pipeline #######################')
csvPath = os.path.join(dataDir, f'events-list-{ii}.csv')
find_event_pipeline(datlistPath, filter_threshold=threshold, number_in_cadence=len(datcadences[ii]), csv_name=csvPath, saving=True);
# run plot_event_pipeline
print()
print('####################### Beginning Plot Event Pipeline #######################')
if os.path.exists(csvPath):
if transitTimes:
# Import local functions
from noahf_plot_event_pipeline import plot_event_pipeline
plot_event_pipeline(csvPath, h5listPath, filter_spec=f'{threshold}', user_validation=False, transit_times=transitTimes)
else:
from turbo_seti.find_event.plot_event_pipeline import plot_event_pipeline
plot_event_pipeline(csvPath, h5listPath, filter_spec=f'{threshold}', user_validation=False)
else:
print('No events to plot :(')
else:
#raise Exception(f'length of input cadence to find_event_pipeline is {len(datcadences[ii])} not 6')
print('WARNING length of cadence does not equal 6, skipping this cadence')
continue
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dir', default=os.getcwd())
parser.add_argument('--threshold', default=3)
parser.add_argument('--transitTimes', default=False)
args = parser.parse_args()
dirl = getLen(args.dir)
print(f'Running on {dirl} files')
if dirl == 6: # for spliced file directories (-1 in case it has a log file)
print('Running on one cadence')
FindPlotEvents_1cad(args.dir, threshold=args.threshold, transitTimes=args.transitTimes)
elif dirl > 6 and dirl%6 == 0:
print(f'Running on {dirl/6} cadences')
FindPlotEvents_ncad(args.dir, threshold=args.threshold, transitTimes=args.transitTimes)
else:
print('This directory has an odd number of files')
with open(os.path.join(args.dir, 'analysis.log'), 'w') as log:
log.write('This directory had an odd number of files and can be removed')
if __name__ == '__main__':
sys.exit(main())
| 9,930 | 36.617424 | 144 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/.ipynb_checkpoints/FindPlot-checkpoint.py
|
# Combined pipeline to simplify analysis of turboSETI files
# Uses both find_event_pipeline and plot_event_pipeline turboSETI methods
# to create waterfall plots of the events found in a full cadence
import os, glob, sys
import urllib
import pandas as pd
import pymysql
import numpy as np
from barycorrpy import utc_tdb
from astropy.coordinates import SkyCoord, EarthLocation
from astropy.time import Time
from astropy import units as u
def FindTransitTimes(dataDir):
'''
Queries the TESS TOI webpage and go_scans database to get information
on the transits of the ON TESS target to plot the start and end
returns : list of transit times, first is the start and second is the end
'''
# Get full dataframe of TESS candidates
toiPath = os.path.join(os.getcwd(), 'TESS-toi.csv')
if not os.path.exists(toiPath):
url = 'https://exofop.ipac.caltech.edu/tess/download_toi.php?sort=toi&output=csv'
urllib.request.urlretrieve(url, toiPath)
TESStoi = pd.read_csv(toiPath)
# Get TESS Targets in GBT go_scans database
BLclient = pymysql.connect(host=os.environ['GCP_IP'],user=os.environ['GCP_USR'],
password=os.environ['GCP_PASS'],database="FileTracking")
BLquery = """
SELECT *
FROM `infiles`
WHERE turboSETI='TRUE'
"""
go_scans = pd.read_sql(BLquery, BLclient)
# Get timing info on TESS target
onTarget = sorted(glob.glob(dataDir + '/*.dat'))[0].split('.')[0].split('_')[-2]
on_toi = np.where(TESStoi['TIC ID'].to_numpy() == int(onTarget[3:]))[0]
on_scans = np.where(go_scans['toi'].to_numpy() == onTarget)[0]
epoch = TESStoi['Epoch (BJD)'].to_numpy()[on_toi]
period = TESStoi['Period (days)'].to_numpy()[on_toi]
tt = TESStoi['Duration (hours)'].to_numpy()[on_toi]/24/2
obsTime = go_scans['obs_time'].to_numpy()[on_scans]
dist = TESStoi['Stellar Distance (pc)'].to_numpy()[0]
PMRA = float(TESStoi['PM RA (mas/yr)'].to_numpy()[0])
PMdec = float(TESStoi['PM Dec (mas/yr)'].to_numpy()[0])
ra = TESStoi['RA'].to_numpy()[0]
dec = TESStoi['Dec'].to_numpy()[0]
coords = SkyCoord(ra, dec, unit=(u.hourangle, u.deg), frame='icrs')
parallax = (1/dist) * 10**(-3) # units of mas
# Convert
gbtloc = EarthLocation.of_site('Green Bank Telescope')
tUTC = Time(obsTime, format='mjd', scale='utc', location=gbtloc)
tbjd = utc_tdb.JDUTC_to_BJDTDB(tUTC, ra=float(coords.to_string().split()[0]),
dec=float(coords.to_string().split()[1]),
pmra=PMRA, pmdec=PMdec,
px=parallax,
obsname='Green Bank Telescope')[0]
transitTimes = []
for obst in tbjd:
diff = np.abs(obst-epoch)
numRot = int(np.ceil(diff/period))
centerTransitTimes = []
t = epoch
for i in range(numRot):
centerTransitTimes.append(t)
if obst < epoch: # check if gbt obs happened before or after epoch
t-=period
else:
t+=period
# Since last value in transit time list is closest to observing time:
epochf = centerTransitTimes[-1] # Units of days in BJD
startTransit = epochf - tt
endTransit = epochf + tt
start_end = np.array([startTransit[0], endTransit[0]])
normTimes = (start_end - obst) * 24 * 3600
transitTimes.append(normTimes)
return transitTimes
def FindPlotEvents(dataDir, threshold=3, transitTimes=True):
'''
dataDir : string with directory housing both the .dat and .h5 files
returns : waterfall plots of data
'''
from turbo_seti.find_event.find_event_pipeline import find_event_pipeline
if transitTimes:
transitTimes = FindTransitTimes(dataDir)
print(transitTimes)
else:
transitTimes = None
# create .lst file for .h5 files
h5list = sorted(glob.glob(dataDir + '/*.h5'))
h5listPath = os.path.join(dataDir, 'h5-list.lst')
with open(h5listPath, 'w') as L:
for h5 in h5list:
L.write(h5 + '\n')
# create .lst file for .dat files
datlist = sorted(glob.glob(dataDir + '/*.dat'))
datlistPath = os.path.join(dataDir, 'dat-list.lst')
with open(datlistPath, 'w') as L:
for dat in datlist:
L.write(dat+'\n')
# run find_event_pipeline
print('####################### Beginning Find Event Pipeline #######################')
csvPath = os.path.join(dataDir, 'events-list.csv')
find_event_pipeline(datlistPath, filter_threshold=threshold, number_in_cadence=len(datlist), csv_name=csvPath, saving=True);
# run plot_event_pipeline
print()
print('####################### Beginning Plot Event Pipeline #######################')
if transitTimes:
# Import local functions
from noahf_plot_event_pipeline import plot_event_pipeline
plot_event_pipeline(csvPath, h5listPath, filter_spec=f'{threshold}', user_validation=False, transit_times=transitTimes)
else:
from turbo_seti.find_event.plot_event_pipeline import plot_event_pipeline
plot_event_pipeline(csvPath, h5listPath, filter_spec=f'{threshold}', user_validation=False)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dir', default=os.getcwd())
parser.add_argument('--threshold', default=3)
parser.add_argument('--transitTimes', default=False)
args = parser.parse_args()
FindPlotEvents(args.dir, threshold=args.threshold, transitTimes=args.transitTimes)
if __name__ == '__main__':
sys.exit(main())
| 5,695 | 34.6 | 128 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/.ipynb_checkpoints/find_target_transitTimes-checkpoint.py
|
# Imports
import os
import pandas as pd
import numpy as np
import pymysql
import urllib
from barycorrpy import utc_tdb
from astropy.time import Time
from astropy.coordinates import EarthLocation, SkyCoord
from astropy import units as u
# tunable variables for later
numobs = 3 # Set minumum number of observations for a target to be recorded
# Get TESS Targets in GBT go_scans database
BLtargets = pymysql.connect(host=os.environ['BLIP'],user=os.environ['BLUSR'],
password=os.environ['BLPASS'],database="BLtargets")
BLquery = """
SELECT *
FROM go_scans
WHERE target_name LIKE 'TIC%'
"""
go_scans = pd.read_sql(BLquery, BLtargets)
# Modify go_scans database for further calculations
go_scans['TIC ID'] = go_scans.target_name.apply(lambda v : int(v[3:]))
gbt = go_scans[['TIC ID', 'target_name', 'utc_observed', 'session']]
TESStoi = pd.read_csv('/home/noahf/BL-TESSsearch/Target-Selection/TESS-toi.csv')
# Find TESS targets that transit during GBT observations
inTransitID = []
inTransitSession = []
transitTimes = {'ticid' : [], 'session' : [], 'ingress' : [], 'egress' : []}
gbtloc = EarthLocation.of_site('Green Bank Telescope')
go_scans['StartObs'] = np.ones(len(go_scans))
for ticid in gbt['TIC ID'].unique():
gbtInfo = gbt.loc[gbt['TIC ID'] == ticid]
TESSinfo = TESStoi.loc[TESStoi['TIC ID'] == ticid]
epoch = TESSinfo['Epoch (BJD)'].to_numpy()[0] # BJD
period = TESSinfo['Period (days)'].to_numpy()[0]
transitLength = TESSinfo['Duration (hours)'].to_numpy()[0]
dist = TESSinfo['Stellar Distance (pc)'].to_numpy()[0]
PMRA = float(TESSinfo['PM RA (mas/yr)'].to_numpy()[0])
PMdec = float(TESSinfo['PM Dec (mas/yr)'].to_numpy()[0])
ra = TESSinfo['RA'].to_numpy()[0]
dec = TESSinfo['Dec'].to_numpy()[0]
coords = SkyCoord(ra, dec, unit=(u.hourangle, u.deg), frame='icrs')
obsTime = gbtInfo['utc_observed'].to_numpy()
session = gbtInfo['session'].to_numpy()
parallax = (1/dist) * 10**(-3) # units of mas
# Convert Observed time to BJD
tUTC = Time(obsTime, location=gbtloc)
tbjd = utc_tdb.JDUTC_to_BJDTDB(tUTC, ra=float(coords.to_string().split()[0]),
dec=float(coords.to_string().split()[1]),
pmra=PMRA, pmdec=PMdec,
px= parallax,
obsname='Green Bank Telescope')[0]
tbjd = tbjd[np.isfinite(tbjd)]
whereTID = np.where(go_scans['TIC ID'] == ticid)[0]
if len(tbjd) > 0:
go_scans.iloc[whereTID, -1] = tbjd
# Does the GBT observation occur during a transit?
if len(obsTime) >= numobs and period != 0: # Check for at least 3 obs
tt = transitLength/24/2 # half transit time and convert to days
# Create list of center transit times for each TIC ID
for ii, obst in enumerate(tbjd):
diff = np.abs(obst-epoch)
numRot = int(np.ceil(diff/period))
centerTransitTimes = []
t = epoch
for i in range(numRot):
centerTransitTimes.append(t)
if obst < epoch: # check if gbt obs happened before or after epoch
t-=period
else:
t+=period
# Since last value in transit time list is closest to observing time:
epochf = centerTransitTimes[-1]
startTransit = epochf - tt
endTransit = epochf + tt
if obst > startTransit and obst < endTransit:
transitTimes['ticid'].append(ticid)
transitTimes['session'].append(session[ii])
transitTimes['ingress'].append(startTransit)
transitTimes['egress'].append(endTransit)
#transitTimes['StartObs'].append(obst)
# Extract go_scans info for transiting TESS Targets
outFrame = []
go_scans['ingress'] = np.ones(len(go_scans['TIC ID']))
go_scans['egress'] = np.ones(len(go_scans['TIC ID']))
# go_scans['StartObs'] = np.ones(len(go_scans['TIC ID']))
for ii in range(len(transitTimes['ticid'])):
tic = transitTimes['ticid'][ii]
sess = transitTimes['session'][ii]
mask = np.where((go_scans['TIC ID'].to_numpy() == tic) & (go_scans['session'].to_numpy() == sess))[0]
if len(mask) >= numobs: # make sure target has 3 obs
go_scans.iloc[mask, -2] = transitTimes['ingress'][ii]
go_scans.iloc[mask, -1] = transitTimes['egress'][ii]
# go_scans.iloc[mask, -1] = transitTimes['StartObs'][ii]
outFrame.append(go_scans.iloc[mask])
times = pd.concat(outFrame)
pd.options.display.float_format = '{:.10f}'.format
#print(times[times.target_name == 'TIC121338379'])
# Get observation end times
totObsTime = 10/60/24 # days
times['StartObs_max'] = times['StartObs']
groupedTime = times.groupby(['target_name', 'receiver']).agg({
'StartObs': 'min',
'StartObs_max' : 'max',
'TIC ID' : 'min',
'session' : 'min',
'ingress' : 'min',
'egress' : 'min'
}).reset_index()
groupedTime['EndObs'] = groupedTime['StartObs_max'] + totObsTime
print(groupedTime.EndObs - groupedTime.StartObs)
# totObsTime = 30/60/24 # days
# groupedTime = times.drop_duplicates(['target_name', 'receiver'])
# withEnd = groupedTime.assign(EndObs = groupedTime['StartObs'] + totObsTime)
# Return csv file and number of unique TESS targets found transiting
groupedTime.to_csv('TransitTimes.csv')
print(groupedTime)
| 5,498 | 32.944444 | 105 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/.ipynb_checkpoints/noahf_plot_event_pipeline-checkpoint.py
|
#!/usr/bin/env python3
r"""
Front-facing script to plot drifting, narrowband events in a set of generalized
cadences of ON-OFF radio SETI observations.
"""
import os
from operator import attrgetter
import pandas
from blimpy import Waterfall
import noahf_plot_event as plot_event
class PathRecord:
r''' Definition of an H5 path record '''
def __init__(self, path_h5, tstart, source_name):
self.path_h5 = path_h5
self.tstart = tstart
self.source_name = source_name
def __repr__(self):
return repr((self.path_h5, self.tstart, self.source_name))
def plot_event_pipeline(event_csv_string, fils_list_string, user_validation=False,
offset=0, filter_spec=None, sortby_tstart=True, plot_dir=None,
transit_times=None):
r"""
This function calls :func:`~turbo_seti.find_event.plot_event.plot_candidate_events` to
plot the events in an output .csv file generated by find_event_pipeline.py
Parameters
----------
event_csv_string : str
The string name of a .csv file that contains the
list of events at a given filter level, created as
output from find_event_pipeline.py. The
.csv should have a filename containing information
about its parameters, for example
"kepler1093b_0015_f2_snr10.csv"
Remember that the file was created with some cadence
(ex. ABACAD) and ensure that the cadence matches the
order of the files in fils_list_string
fils_list_string : str
The string name of a plaintext file ending in .lst
that contains the filenames of .fil files, each on a
new line, that corresponds to the cadence used to
create the .csv file used for event_csv_string.
user_validation : bool, optional
A True/False flag that, when set to True, asks if the
user wishes to continue with their input parameters
(and requires a 'y' or 'n' typed as confirmation)
before beginning to run the program. Recommended when
first learning the program, not recommended for
automated scripts.
offset : int, optional
The amount that the overdrawn "best guess" line from
the event parameters in the csv should be shifted from
its original position to enhance readability. Can be
set to 0 (default; draws line on top of estimated
event) or 'auto' (shifts line to the left by an auto-
calculated amount, with addition lines showing original
position).
sortby_tstart : bool
If True, the input file list is sorted by header.tstart.
Examples
--------
>>> import plot_event_pipeline;
... plot_event_pipeline.plot_event_pipeline(event_csv_string, fils_list_string,
... user_validation=False, offset=0)
"""
#reading in the .csv containing the events
try:
candidate_event_dataframe = pandas.read_csv(event_csv_string, comment='#')
print("plot_event_pipeline: Opened file {}".format(event_csv_string))
except:
print("*** plot_event_pipeline: Oops, cannot access file {}".format(event_csv_string))
return
fil_file_list = []
for file in pandas.read_csv(fils_list_string, encoding='utf-8', header=None, chunksize=1):
fil_file_list.append(file.iloc[0,0])
#obtaining source names
source_name_list = []
path_record = []
for fil in fil_file_list:
wf = Waterfall(fil, load_data=False)
source_name = wf.container.header["source_name"]
source_name_list.append(source_name)
tstart = wf.container.header["tstart"]
path_record.append(PathRecord(fil, tstart, source_name))
# If sorting by header.tstart, then rewrite the dat_file_list in header.tstart order.
if sortby_tstart:
path_record = sorted(path_record, key=attrgetter('tstart'))
fil_file_list = []
for obj in path_record:
fil_file_list.append(obj.path_h5)
print("plot_event_pipeline: file = {}, tstart = {}, source_name = {}"
.format(os.path.basename(obj.path_h5), obj.tstart, obj.source_name))
else:
for obj in path_record:
print("plot_event_pipeline: file = {}, tstart = {}, source_name = {}"
.format(os.path.basename(obj.path_h5), obj.tstart, obj.source_name))
#get rid of bytestring "B'"s if they're there (early versions of
#seti_event.py added "B'"s to all of the source names)
on_source_name_original = candidate_event_dataframe.Source[0]
if on_source_name_original[0] == 'B' and on_source_name_original[-1] == '\'':
on_source_name = on_source_name_original[2:-2]
else:
on_source_name = on_source_name_original
candidate_event_dataframe = candidate_event_dataframe.replace(to_replace=on_source_name_original,
value=on_source_name)
# Establish filter-level from filter_spec (preferred)
# or 3rd token of the .csv path (don't break an existing caller)
if filter_spec is None:
filter_level = event_csv_string.split('_')[2]
else:
filter_level = filter_spec
#begin user validation
print("Plotting some events for: ", on_source_name)
print("There are " + str(len(candidate_event_dataframe.Source)) + " total events in the csv file " + event_csv_string)
print("therefore, you are about to make " + str(len(candidate_event_dataframe.Source)) + " .png files.")
if user_validation:
question = "Do you wish to proceed with these settings?"
while "the answer is invalid":
reply = str(input(question+' (y/n): ')).lower().strip()
if reply == '':
return
if reply[0] == 'y':
break
if reply[0] == 'n':
return
#move to plot_event.py for the actual plotting
plot_event.plot_candidate_events(candidate_event_dataframe,
fil_file_list,
filter_level,
source_name_list,
offset=offset,
plot_dir=plot_dir,
transit=transit_times)
| 6,336 | 40.418301 | 122 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/.ipynb_checkpoints/plot_dat_mod-checkpoint.py
|
import os, glob, sys
from turbo_seti.find_event.plot_dat import plot_dat
from turbo_seti import find_event as find
import numpy as np
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dir', default=os.getcwd())
parser.add_argument('--minHit', type=float, default=None)
parser.add_argument('--maxHit', type=float, default=None)
args = parser.parse_args()
path = args.dir
dat_files = glob.glob(path + "*.dat")
min_hit = 1e9
max_hit = 0
if args.minHit == None or args.maxHit == None:
for file in dat_files:
tbl = find.read_dat(file)
min_freq, max_freq = min(tbl["Freq"]), max(tbl["Freq"])
if min_freq < min_hit:
min_hit = min_freq
if max_freq > max_hit:
max_hit = max_freq
else:
min_hit = args.minHit
max_hit = args.maxHit # set min and max hits by hand just to get this image
print("Lowest frequency hit: ", min_hit)
print("Highext frequency hit: ", max_hit)
plot_range = 2000*1e-6 # a 2000Hz width, adjusted to be in units of MHz
freq_range = np.arange(np.round(min_hit, 2), np.round(max_hit), plot_range)
outDir = path + "bautista-analysis/"
if not os.path.exists(outDir):
os.mkdir(outDir)
for center in freq_range:
plot_dat(path + "dat-list.lst",
path + "h5-list.lst",
path + "events-list.csv",
outdir=outDir,
check_zero_drift=False,
alpha=0.65,
color="black",
window=(center-0.001, center+0.001))
if __name__ == '__main__':
sys.exit(main())
| 1,710 | 29.553571 | 83 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/.ipynb_checkpoints/FindPlotEvents_pipeline-checkpoint.py
|
# Combined pipeline to simplify analysis of turboSETI files
# Uses both find_event_pipeline and plot_event_pipeline turboSETI methods
# to create waterfall plots of the events found in a full cadence
import os, glob
import urllib
import pandas as pd
import pymysql
import numpy as np
from barycorrpy import utc_tdb
from astropy.coordinates import SkyCoord, EarthLocation
from astropy.time import Time
from astropy import units as u
def FindTransitTimes(dataDir):
'''
Queries the TESS TOI webpage and go_scans database to get information
on the transits of the ON TESS target to plot the start and end
returns : list of transit times, first is the start and second is the end
'''
# Get full dataframe of TESS candidates
toiPath = os.path.join(os.getcwd(), 'TESS-toi.csv')
if not os.path.exists(toiPath):
url = 'https://exofop.ipac.caltech.edu/tess/download_toi.php?sort=toi&output=csv'
urllib.request.urlretrieve(url, toiPath)
TESStoi = pd.read_csv(toiPath)
# Get TESS Targets in GBT go_scans database
BLclient = pymysql.connect(host=os.environ['GCP_IP'],user=os.environ['GCP_USR'],
password=os.environ['GCP_PASS'],database="FileTracking")
BLquery = """
SELECT *
FROM `infiles`
WHERE turboSETI='TRUE'
"""
go_scans = pd.read_sql(BLquery, BLclient)
# Get timing info on TESS target
onTarget = sorted(glob.glob(dataDir + '/*.dat'))[0].split('.')[0].split('_')[-2]
on_toi = np.where(TESStoi['TIC ID'].to_numpy() == int(onTarget[3:]))[0]
on_scans = np.where(go_scans['toi'].to_numpy() == onTarget)[0]
epoch = TESStoi['Epoch (BJD)'].to_numpy()[on_toi]
period = TESStoi['Period (days)'].to_numpy()[on_toi]
tt = TESStoi['Duration (hours)'].to_numpy()[on_toi]/24/2
obsTime = go_scans['obs_time'].to_numpy()[on_scans]
dist = TESStoi['Stellar Distance (pc)'].to_numpy()[0]
PMRA = float(TESStoi['PM RA (mas/yr)'].to_numpy()[0])
PMdec = float(TESStoi['PM Dec (mas/yr)'].to_numpy()[0])
ra = TESStoi['RA'].to_numpy()[0]
dec = TESStoi['Dec'].to_numpy()[0]
coords = SkyCoord(ra, dec, unit=(u.hourangle, u.deg), frame='icrs')
parallax = (1/dist) * 10**(-3) # units of mas
# Convert
gbtloc = EarthLocation.of_site('Green Bank Telescope')
tUTC = Time(obsTime, format='mjd', scale='utc', location=gbtloc)
tbjd = utc_tdb.JDUTC_to_BJDTDB(tUTC, ra=float(coords.to_string().split()[0]),
dec=float(coords.to_string().split()[1]),
pmra=PMRA, pmdec=PMdec,
px=parallax,
obsname='Green Bank Telescope')[0]
transitTimes = []
for obst in tbjd:
diff = np.abs(obst-epoch)
numRot = int(np.ceil(diff/period))
centerTransitTimes = []
t = epoch
for i in range(numRot):
centerTransitTimes.append(t)
if obst < epoch: # check if gbt obs happened before or after epoch
t-=period
else:
t+=period
# Since last value in transit time list is closest to observing time:
epochf = centerTransitTimes[-1] # Units of days in BJD
startTransit = epochf - tt
endTransit = epochf + tt
start_end = np.array([startTransit[0], endTransit[0]])
normTimes = (start_end - obst) * 24 * 3600
transitTimes.append(normTimes)
return transitTimes
def FindPlotEvents(dataDir, threshold=3, transitTimes=True):
'''
dataDir : string with directory housing both the .dat and .h5 files
returns : waterfall plots of data
'''
from turbo_seti.find_event.find_event_pipeline import find_event_pipeline
if transitTimes:
transitTimes = FindTransitTimes(dataDir)
print(transitTimes)
else:
transitTimes = None
# create .lst file for .h5 files
h5list = sorted(glob.glob(dataDir + '/*.h5'))
h5listPath = os.path.join(dataDir, 'h5-list.lst')
with open(h5listPath, 'w') as L:
for h5 in h5list:
L.write(h5 + '\n')
# create .lst file for .dat files
datlist = sorted(glob.glob(dataDir + '/*.dat'))
datlistPath = os.path.join(dataDir, 'dat-list.lst')
with open(datlistPath, 'w') as L:
for dat in datlist:
L.write(dat+'\n')
# run find_event_pipeline
print('####################### Beginning Find Event Pipeline #######################')
csvPath = os.path.join(dataDir, 'events-list.csv')
find_event_pipeline(datlistPath, filter_threshold=threshold, number_in_cadence=len(datlist), csv_name=csvPath, saving=True);
# run plot_event_pipeline
print()
print('####################### Beginning Plot Event Pipeline #######################')
if transitTimes:
# Import local functions
from noahf_plot_event_pipeline import plot_event_pipeline
plot_event_pipeline(csvPath, h5listPath, filter_spec=f'{threshold}', user_validation=False, transit_times=transitTimes)
else:
from turbo_seti.find_event.plot_event_pipeline import plot_event_pipeline
plot_event_pipeline(csvPath, h5listPath, filter_spec=f'{threshold}', user_validation=False)
| 5,325 | 35.231293 | 128 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/.ipynb_checkpoints/noahf_plot_event-checkpoint.py
|
#!/usr/bin/env python3
r'''
Backend script to plot drifting, narrowband events in a generalized cadence of
ON-OFF radio SETI observations. The main function contained in this file is
:func:`~.plot_candidate_events` uses the other helper functions
in this file (described below) to plot events from a turboSETI event .csv file.
'''
from os import mkdir
from os.path import dirname, abspath, isdir
import gc
import logging
logger_plot_event_name = 'plot_event'
logger_plot_event = logging.getLogger(logger_plot_event_name)
logger_plot_event.setLevel(logging.INFO)
# Plotting packages import
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('agg')
# Math/Science package imports
import numpy as np
from astropy.time import Time
# BL imports
import blimpy as bl
from blimpy.utils import rebin
# preliminary plot arguments
fontsize=16
font = {'family' : 'DejaVu Sans',
'size' : fontsize}
MAX_IMSHOW_POINTS = (4096, 1268)
def overlay_drift(f_event, f_start, f_stop, drift_rate, t_duration, offset=0, alpha=1, color='#cc0000'):
r'''
Creates a dashed red line at the recorded frequency and drift rate of
the plotted event - can overlay the signal exactly or be offset by
some amount (offset can be 0 or 'auto').
'''
# determines automatic offset and plots offset lines
if offset == 'auto':
offset = ((f_start - f_stop) / 10)
plt.plot((f_event - offset, f_event),
(10, 10),
"o-",
c=color,
lw=2,
alpha=alpha)
# plots drift overlay line, with offset if desired
plt.plot((f_event + offset, f_event + drift_rate/1e6 * t_duration + offset),
(0, t_duration),
c=color,
ls='dashed', lw=2,
alpha=alpha)
def plot_waterfall(wf, source_name, f_start=None, f_stop=None, transit_times=None, **kwargs):
r"""
Plot waterfall of data in a .fil or .h5 file.
Parameters
----------
wf : blimpy.Waterfall object
Waterfall object of an H5 or Filterbank file containing the dynamic spectrum data.
source_name : str
Name of the target.
f_start : float
Start frequency, in MHz.
f_stop : float
Stop frequency, in MHz.
transit_times : list w/ len 2
list of transit start and transit end
kwargs : dict
Keyword args to be passed to matplotlib imshow().
Notes
-----
Plot a single-panel waterfall plot (frequency vs. time vs. intensity)
for one of the on or off observations in the cadence of interest, at the
frequency of the expected event. Calls :func:`~overlay_drift`
"""
# prepare font
matplotlib.rc('font', **font)
# Load in the data from fil
plot_f, plot_data = wf.grab_data(f_start=f_start, f_stop=f_stop)
# Make sure waterfall plot is under 4k*4k
dec_fac_x, dec_fac_y = 1, 1
# rebinning data to plot correctly with fewer points
try:
if plot_data.shape[0] > MAX_IMSHOW_POINTS[0]:
dec_fac_x = plot_data.shape[0] / MAX_IMSHOW_POINTS[0]
if plot_data.shape[1] > MAX_IMSHOW_POINTS[1]:
dec_fac_y = int(np.ceil(plot_data.shape[1] / MAX_IMSHOW_POINTS[1]))
plot_data = rebin(plot_data, dec_fac_x, dec_fac_y)
except Exception as ex:
print('\n*** Oops, grab_data returned plot_data.shape={}, plot_f.shape={}'
.format(plot_data.shape, plot_f.shape))
print('Waterfall info for {}:'.format(wf.filename))
wf.info()
raise ValueError('*** Something is wrong with the grab_data output!') from ex
# Rolled back PR #82
# determine extent of the plotting panel for imshow
extent=(plot_f[0], plot_f[-1], (wf.timestamps[-1]-wf.timestamps[0])*24.*60.*60, 0.0)
# plot and scale intensity (log vs. linear)
kwargs['cmap'] = kwargs.get('cmap', 'viridis')
plot_data = 10.0 * np.log10(plot_data)
# get normalization parameters
vmin = plot_data.min()
vmax = plot_data.max()
normalized_plot_data = (plot_data - vmin) / (vmax - vmin)
# display the waterfall plot
this_plot = plt.imshow(normalized_plot_data,
aspect='auto',
rasterized=True,
interpolation='nearest',
extent=extent,
**kwargs
)
if transit_times:
start_transit, stop_transit = min(transit_times), max(transit_times)
if start_transit > min(wf.timestamps):
plt.axhline(start_transit, 'r--', label='Transit Start')
if stop_transit < max(wf.timestamps):
plt.axhline(max(transit_times), 'r--', label='Transit End')
# add plot labels
plt.xlabel("Frequency [Hz]",fontdict=font)
plt.ylabel("Time [s]",fontdict=font)
# add source name
ax = plt.gca()
plt.text(0.03, 0.8, source_name, transform=ax.transAxes, bbox=dict(facecolor='white'))
# if plot_snr != False:
# plt.text(0.03, 0.6, plot_snr, transform=ax.transAxes, bbox=dict(facecolor='white'))
# return plot
del plot_f, plot_data
gc.collect()
return this_plot
def make_waterfall_plots(fil_file_list, on_source_name, f_start, f_stop, drift_rate, f_mid,
filter_level, source_name_list, offset=0, plot_dir=None, plotTransit=None,
**kwargs):
r'''
Makes waterfall plots of an event for an entire on-off cadence.
Parameters
----------
fil_file_list : str
List of filterbank files in the cadence.
on_source_name : str
Name of the on_source target.
f_start : float
Start frequency, in MHz.
f_stop : float
Stop frequency, in MHz.
drift_rate : float
Drift rate in Hz/s.
f_mid : float
<iddle frequency of the event, in MHz.
filter_level : int
Filter level (1, 2, or 3) that produced the event.
source_name_list : list
List of source names in the cadence, in order.
bandwidth : int
Width of the plot, incorporating drift info.
kwargs : dict
Keyword args to be passed to matplotlib imshow().
Notes
-----
Makes a series of waterfall plots, to be read from top to bottom, displaying a full cadence
at the frequency of a recorded event from find_event. Calls :func:`~plot_waterfall`
'''
global logger_plot_event
# prepare for plotting
matplotlib.rc('font', **font)
# set up the sub-plots
n_plots = len(fil_file_list)
fig = plt.subplots(n_plots, sharex=True, sharey=True,figsize=(10, 2*n_plots))
# get directory path for storing PNG files
if plot_dir is None:
dirpath = dirname(abspath(fil_file_list[0])) + '/'
else:
if not isdir(plot_dir):
mkdir(plot_dir)
dirpath = plot_dir
# read in data for the first panel
max_load = bl.calcload.calc_max_load(fil_file_list[0])
#print('plot_event make_waterfall_plots: max_load={} is required for {}'.format(max_load, fil_file_list[0]))
wf1 = bl.Waterfall(fil_file_list[0], f_start=f_start, f_stop=f_stop, max_load=max_load)
t0 = wf1.header['tstart']
plot_f1, plot_data1 = wf1.grab_data()
# rebin data to plot correctly with fewer points
dec_fac_x, dec_fac_y = 1, 1
if plot_data1.shape[0] > MAX_IMSHOW_POINTS[0]:
dec_fac_x = plot_data1.shape[0] / MAX_IMSHOW_POINTS[0]
if plot_data1.shape[1] > MAX_IMSHOW_POINTS[1]:
dec_fac_y = int(np.ceil(plot_data1.shape[1] / MAX_IMSHOW_POINTS[1]))
plot_data1 = rebin(plot_data1, dec_fac_x, dec_fac_y)
# define more plot parameters
# never used: delta_f = 0.000250
mid_f = np.abs(f_start+f_stop)/2.
subplots = []
del wf1, plot_f1, plot_data1
gc.collect()
# Fill in each subplot for the full plot
for ii, filename in enumerate(fil_file_list):
logger_plot_event.debug('make_waterfall_plots: file {} in list: {}'.format(ii, filename))
# identify panel
subplot = plt.subplot(n_plots, 1, ii + 1)
subplots.append(subplot)
# read in data
max_load = bl.calcload.calc_max_load(filename)
#print('plot_event make_waterfall_plots: max_load={} is required for {}'.format(max_load, filename))
wf = bl.Waterfall(filename, f_start=f_start, f_stop=f_stop, max_load=max_load)
# make plot with plot_waterfall
source_name = source_name_list[ii]
if plotTransit:
this_plot = plot_waterfall(wf,
source_name,
f_start=f_start,
f_stop=f_stop,
transit_times=plotTransit[ii]
**kwargs)
else:
this_plot = plot_waterfall(wf,
source_name,
f_start=f_start,
f_stop=f_stop,
**kwargs)
# calculate parameters for estimated drift line
t_elapsed = Time(wf.header['tstart'], format='mjd').unix - Time(t0, format='mjd').unix
t_duration = (wf.n_ints_in_file - 1) * wf.header['tsamp']
f_event = f_mid + drift_rate / 1e6 * t_elapsed
# plot estimated drift line
overlay_drift(f_event, f_start, f_stop, drift_rate, t_duration, offset)
# Title the full plot
if ii == 0:
plot_title = "%s \n $\\dot{\\nu}$ = %2.3f Hz/s, MJD:%5.5f" % (on_source_name, drift_rate, t0)
plt.title(plot_title)
# Format full plot
if ii < len(fil_file_list)-1:
plt.xticks(np.linspace(f_start, f_stop, num=4), ['','','',''])
del wf
gc.collect()
# More overall plot formatting, axis labelling
factor = 1e6
units = 'Hz'
#ax = plt.gca()
#ax.get_xaxis().get_major_formatter().set_useOffset(False)
xloc = np.linspace(f_start, f_stop, 5)
xticks = [round(loc_freq) for loc_freq in (xloc - mid_f)*factor]
if np.max(xticks) > 1000:
xticks = [xt/1000 for xt in xticks]
units = 'kHz'
plt.xticks(xloc, xticks)
plt.xlabel("Relative Frequency [%s] from %f MHz"%(units,mid_f),fontdict=font)
# Add colorbar
cax = fig[0].add_axes([0.94, 0.11, 0.03, 0.77])
fig[0].colorbar(this_plot,cax=cax,label='Normalized Power (Arbitrary Units)')
# Adjust plots
plt.subplots_adjust(hspace=0,wspace=0)
# save the figures
path_png = dirpath + str(filter_level) + '_' + on_source_name + '_dr_' + "{:0.2f}".format(drift_rate) + '_freq_' "{:0.6f}".format(f_start) + ".png"
plt.savefig(path_png, bbox_inches='tight')
logger_plot_event.debug('make_waterfall_plots: Saved file {}'.format(path_png))
# show figure before closing if this is an interactive context
mplbe = matplotlib.get_backend()
logger_plot_event.debug('make_waterfall_plots: backend = {}'.format(mplbe))
if mplbe != 'agg':
plt.show()
# close all figure windows
plt.close('all')
return subplots
def plot_candidate_events(candidate_event_dataframe, fil_file_list, filter_level, source_name_list,
offset=0, plot_dir=None, transit=None, **kwargs):
r'''
Calls :func:`~make_waterfall_plots` on each event in the input .csv file.
Arguments
---------
candidate_event_dataframe : dict
A pandas dataframe containing information
about a candidate event. The necessary data
includes the start and stop frequencies, the
drift rate, and the source name. To determine
the required variable names and formatting
conventions, see the output of
find_event_pipeline.
fil_file_list : list
A Python list that contains a series of
strings corresponding to the filenames of .fil
files, each on a new line, that corresponds to
the cadence used to create the .csv file used
for event_csv_string.
filter_level : int
A string indicating the filter level of the
cadence used to generate the
candidate_event_dataframe. Used only for
output file naming, convention is "f1", "f2",
or "f3". Descriptions for the three levels of
filtering can be found in the documentation
for find_event.py
source_name_list : list
A Python list that contains a series of strings
corresponding to the source names of the
cadence in chronological (descending through
the plot panels) cadence.
offset : int, optional
The amount that the overdrawn "best guess"
line from the event parameters in the csv
should be shifted from its original position
to enhance readability. Can be set to 0
(default; draws line on top of estimated
event) or 'auto' (shifts line to the left by
an auto-calculated amount, with addition lines
showing original position).
kwargs : dict
Examples
--------
It is highly recommended that users interact with this program via the
front-facing plot_event_pipeline.py script. See the usage of that file in
its own documentation.
If you would like to run plot_candidate_events without calling
plot_event_pipeline.py, the usage is as follows:
>>> plot_event.plot_candidate_events(candidate_event_dataframe, fil_file_list,
... filter_level, source_name_list, offset=0)
'''
global logger_plot_event
# load in the data for each individual hit
if candidate_event_dataframe is None:
print('*** plot_candidate_events: candidate_event_dataframe is None, nothing to do.')
return
len_df = len(candidate_event_dataframe)
if len_df < 1:
print('*** plot_candidate_events: len(candidate_event_dataframe) = 0, nothing to do.')
return
for i in range(0, len_df):
candidate = candidate_event_dataframe.iloc[i]
on_source_name = candidate['Source']
f_mid = candidate['Freq']
drift_rate = candidate['DriftRate']
# calculate the length of the total cadence from the fil files' headers
first_fil = bl.Waterfall(fil_file_list[0], load_data=False)
tfirst = first_fil.header['tstart']
last_fil = bl.Waterfall(fil_file_list[-1], load_data=False)
tlast = last_fil.header['tstart']
t_elapsed = Time(tlast, format='mjd').unix - Time(tfirst, format='mjd').unix + (last_fil.n_ints_in_file -1) * last_fil.header['tsamp']
# calculate the width of the plot based on making sure the full drift is visible
bandwidth = 2.4 * abs(drift_rate)/1e6 * t_elapsed
bandwidth = np.max((bandwidth, 500./1e6))
# Get start and stop frequencies based on midpoint and bandwidth
f_start, f_stop = np.sort((f_mid - (bandwidth/2), f_mid + (bandwidth/2)))
# logger_plot_event.debug useful values
logger_plot_event.debug('*************************************************')
logger_plot_event.debug('*** The Parameters for This Plot Are: ****')
logger_plot_event.debug('Target = {}'.format(on_source_name))
logger_plot_event.debug('Bandwidth = {} MHz'.format(round(bandwidth, 5)))
logger_plot_event.debug('Time Elapsed (inc. Slew) = {} s'.format(round(t_elapsed)))
logger_plot_event.debug('Middle Frequency = {} MHz'.format(round(f_mid, 4)))
logger_plot_event.debug('Expected Drift = {} Hz/s'.format(round(drift_rate, 4)))
logger_plot_event.debug('*************************************************')
# Pass info to make_waterfall_plots() function
make_waterfall_plots(fil_file_list,
on_source_name,
f_start,
f_stop,
drift_rate,
f_mid,
filter_level,
source_name_list,
offset=offset,
plot_dir=plot_dir,
plotTransit=transit,
**kwargs)
| 16,187 | 36.472222 | 151 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/seti_limits_py3/4band_compare.py
|
from color_coded_ET_power_law import *
import matplotlib
#matplotlib.rcParams['text.usetex'] = True
#import matplotlib.pyplot as plt
from LBand_SETI_compare import lband_compare
from SBand_SETI_compare import sband_compare
from CBand_SETI_compare import cband_compare
from XBand_SETI_compare import xband_compare
import numpy as np
#params = {'text.usetex':True,
# 'font.family':'serif',
# 'font.serif':['Palatino']}
#plt.rcParams.update(params)
def seti_compare(y_label_units=True):
''' Compare SETI project with previus surveys.
'''
# Get dictionaries of plot-relevant values
Lband = lband_compare(save=False)
Sband = sband_compare(save=False)
Cband = cband_compare(save=False)
Xband = xband_compare(save=False)
# Place all dictionaries in list --> Allows plotting via for-loop
dict_list = [Lband, Sband, Cband, Xband]
Lband['color'] = 'mediumorchid'
#---------------------------------------------------------------------------------
# plotting setup
plt.ion()
#plt.figure(figsize=(15, 10))
alpha = 0.7
markersize = 20
fontsize = 20
ticksize = fontsize - 2
dot_size = markersize - 12
colors = ['tab:red','tab:green','tab:orange','tab:blue']
band_handles = {'L':[],'S':[],'C':[],'X':[]}
band_letters = ['L','S','C','X']
# Plot values for all 4 bands
for i, band_dict in enumerate(dict_list):
outside, = plt.plot(np.log10(band_dict['EIRP']),np.log10(1./band_dict['rarity']),marker = '*', linestyle='None', color = colors[i], markersize = markersize-2)
#outside, = plt.plot(np.log10(band_dict['EIRP']),np.log10(1./band_dict['rarity']),marker = (4,1,30), linestyle='None', color = colors[i], markersize = markersize)
#inside, = plt.plot([np.log10(band_dict['EIRP'])],[np.log10(1./band_dict['rarity'])],marker='o', color='k', markersize = dot_size-5, linestyle='None')
band_handles[band_letters[i]].append(outside)
#band_handles[band_letters[i]].append(inside)
#plt.legend((outside, inside), band_dict['project'])
#plt.plot([np.log10(band_dict['EIRP'])],[np.log10(1./band_dict['rarity'])],marker = band_dict['shape'], color = band_dict['color'],markersize = markersize, label=band_dict['project'])
# Plot values of other surveys
#h = ET_power_law()
#plt.legend([band_handles['L'][0], band_handles['S'][0], band_handles['C'][0], band_handles['X'][0], h['p1'], h['p2'], h['e'], h['gm'], (h['h_a1'], h['h_a2']), h['s1'], h['pha'], h['hs'], h['m']], ['This Project: L-Band', 'This Project: S-Band', 'This Project: C-Band', 'This Project: X-Band', 'Price (2020 - Parkes)','Price (2020 - GBT)','Enriquez (2017)','Gray&Mooley (2017)', 'Harp (2016) All*','Siemion (2013)','Phoenix All*','Horowitz&Sagan (1993)', 'Tremblay (2020)'], labelspacing=1.75)
#plt.legend([band_handles['L'][0], band_handles['S'][0], band_handles['C'][0], band_handles['X'][0], h['p1'], h['p2'], h['e'], h['gm'], h['h_ab'], h['h_c'], h['h_d'], (h['h_a1'], h['h_a2']), (h['s1'], h['s2']), h['ph'], h['pha'], h['hs'], h['v'], h['t'], h['ver']], ['This Project: L-Band', 'This Project: S-Band', 'This Project: C-Band', 'This Project: X-Band', 'Price (2019 - Parkes)','Price (2019 - GBT)','Enriquez (2017)','Gray&Mooley (2017)', 'Harp (2016) a,b','Harp (2016) c','Harp (2016) d','Harp (2016) All*','Siemion (2013)','Phoenix','Phoenix All*','Horowitz&Sagan (1993)','Valdes (1986)','Tarter (1980)','Verschuur (1973)'])
#plt.legend(numpoints=1,scatterpoints=1,fancybox=True, shadow=True)
#plt.ylim(-10,0)
#plt.xlabel(r'EIRP$_{min}\ \left[\/\log_{10}\left(Watts\right)\/\right]$',fontsize = fontsize)
#plt.ylabel('Transmiter Galactic Rarity [log((Nstars*BW)^-1)]',fontsize=fontsize)
# if y_label_units:
# plt.ylabel(r'Transmitter Rate $\left[\/\log\left(\frac{1}{N_{stars} \cdot \nu_{rel}}\right)\/\right]$',fontsize=fontsize)
# else:
# plt.ylabel('Transmitter Rate ',fontsize=fontsize)
# plt.xticks(fontsize = ticksize)
# plt.yticks(fontsize = ticksize)
# #plt.ylim(-10,4)
# #plt.xlim(10,23)
# from datetime import datetime
# image_filename = 'images/'+'SETI_limits_comparison' + datetime.now().strftime("_%m-%d-%y_%H:%M:%S") + '.png'
# plt.savefig(image_filename, format='png',bbox_inches='tight')
# import os
# os.system("shotwell %s &"%(image_filename))
# seti_compare()
| 4,432 | 48.808989 | 635 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/seti_limits_py3/CBand_SETI_compare.py
|
from ET_power_law import *
import matplotlib.pylab as plt
def cband_compare(max_distance, save=False):
#---------------------------
# Edit values below vvvvvv
#---------------------------
# Your new values ( e.g. Enriquez 2017 x 100)
project = 'This Study: C-Band' #Project name
telescope = 'GBT' # Telescope name
N_stars = 20 # Estimated number of stars
band = 3800e6 # Total bandwidth [Hz]
central_freq = 5.9e9 # Central bandwidth [Hz]
dish_diam = 100 #Telescope diameter meters (single dish in current version)
dish_Tsys = 21.50 #Telescope Tsys [Kelvin]
dish_app_eff = 0.72 #Telescope Apperture Efficiency
SNR_threshold = 10 #Survey threshold [sigma above the mean]
spectral_resolution = 3. #Spectral resolution [Hz]
scan_obs_time = 300 # Observation time per scan [sec]
#max_distance = 935.46 #Maximum distance [pc]
iband = 3800e6 #Instantaneous Bandwidth [Hz]
shape = '*' # Figure shape
color = 'b' # Figure color
y_label_units = True # Units in Y label
#---------------------------
# Edit values above ^^^^^
#---------------------------
#Calculating limits
zeta_AO = 1e3*0.5/ 1e13
freq_range_norm = (band/central_freq)
SEFD = calc_SEFD(calc_DishArea(dish_diam), dish_Tsys, eff=dish_app_eff) # 10 Jy (GBT)
SEFD = 10.6468
Sens = calc_Sensitivity(SNR_threshold, spectral_resolution,scan_obs_time,SEFD=SEFD)
dist_m = (max_distance*3.26156 * u.lyr.to('m'))
EIRP = calc_EIRP_min(dist_m,Sens)
#EIRP = 677.5e12
survey_rarity = N_stars*freq_range_norm
survey_speed = SEFD**2*spectral_resolution/iband
survey_sky = N_stars * calc_BeamSize(dish_diam,central_freq)
survey_DFM = survey_sky * band / Sens**(3/2.)
def print_project():
print('~o~', project ,' (', telescope,') ', '~o~')
print('SEFD :', SEFD)
print('Sens :', Sens)
print('Max Distance (m) :', dist_m)
print('EIRP :', EIRP)
print('BeamSize :', calc_BeamSize(dish_diam,central_freq))
print('Sky Coverage :', survey_sky)
print('CWTFM :', zeta_AO *(EIRP) / (survey_rarity))
print('DFM :', survey_DFM)
print_project()
#---------------------------
#Comparing SETI limits
if save:
compare_SETI_limits(EIRP,survey_rarity,shape=shape,color=color,project=project,y_label_units=y_label_units, save_as='CBand_seti_compare')
cband_dict = {'EIRP':EIRP, 'rarity':survey_rarity, 'shape':shape, 'color':color, 'project':project, 'y_label_units':y_label_units}
return cband_dict
| 2,623 | 32.641026 | 145 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/seti_limits_py3/SBand_SETI_compare.py
|
from ET_power_law import *
import matplotlib.pylab as plt
def sband_compare(max_distance, save=False):
#---------------------------
# Edit values below vvvvvv
#---------------------------
# Your new values ( e.g. Enriquez 2017 x 100)
project = 'This Study: S-Band' #Project name
telescope = 'GBT' # Telescope name
N_stars = 17 # Estimated number of stars
band = 940e6 # Total bandwidth [Hz]
central_freq = 2.3e9 # Central bandwidth [Hz]
dish_diam = 100 #Telescope diameter meters (single dish in current version)
dish_Tsys = 14.80 #Telescope Tsys [Kelvin]
dish_app_eff = 0.72 #Telescope Apperture Efficiency
SNR_threshold = 10 #Survey threshold [sigma above the mean]
spectral_resolution = 3. #Spectral resolution [Hz]
scan_obs_time = 300 # Observation time per scan [sec]
#max_distance = 560.22 #Maximum distance [pc]
iband = 1000e6 #Instantaneous Bandwidth [Hz]
shape = '*' # Figure shape
color = 'g' # Figure color
y_label_units = True # Units in Y label
#---------------------------
# Edit values above ^^^^^
#---------------------------
#Calculating limits
zeta_AO = 1e3*0.5/ 1e13
freq_range_norm = (band/central_freq)
SEFD = calc_SEFD(calc_DishArea(dish_diam), dish_Tsys, eff=dish_app_eff) # 10 Jy (GBT)
SEFD = 14.80
Sens = calc_Sensitivity(SNR_threshold, spectral_resolution,scan_obs_time,SEFD=SEFD)
dist_m = (max_distance*3.26156 * u.lyr.to('m'))
EIRP = calc_EIRP_min(dist_m,Sens)
#EIRP = 466.4e12
survey_rarity = N_stars*freq_range_norm
survey_speed = SEFD**2*spectral_resolution/iband
survey_sky = N_stars * calc_BeamSize(dish_diam,central_freq)
survey_DFM = survey_sky * band / Sens**(3/2.)
def print_project():
print('~o~', project ,' (', telescope,') ', '~o~')
print('SEFD :', SEFD)
print('Sens :', Sens)
print('EIRP :', EIRP)
print('Max Distance (m) :', dist_m)
print('BeamSize :', calc_BeamSize(dish_diam,central_freq))
print('Sky Coverage :', survey_sky)
print('CWTFM :', zeta_AO *(EIRP) / (survey_rarity))
print('DFM :', survey_DFM)
print_project()
#---------------------------
#Comparing SETI limits
if save:
compare_SETI_limits(EIRP,survey_rarity,shape=shape,color=color,project=project,y_label_units=y_label_units, save_as='SBand_seti_compare')
sband_dict = {'EIRP':EIRP, 'rarity':survey_rarity, 'shape':shape, 'color':color, 'project':project, 'y_label_units':y_label_units}
return sband_dict
| 2,618 | 33.012987 | 145 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/seti_limits_py3/XBand_SETI_compare.py
|
from ET_power_law import *
import matplotlib.pylab as plt
def xband_compare(max_distance, save=False):
#---------------------------
# Edit values below vvvvvv
#---------------------------
# Your new values ( e.g. Enriquez 2017 x 100)
project = 'This Study: X-Band' #Project name
telescope = 'GBT' # Telescope name
N_stars = 23 # Estimated number of stars
band = 4200e6 # Total bandwidth [Hz]
central_freq = 9.9e9 # Central bandwidth [Hz]
dish_diam = 100 #Telescope diameter meters (single dish in current version)
dish_Tsys = 30.80 #Telescope Tsys [Kelvin]
dish_app_eff = 0.72 #Telescope Apperture Efficiency
SNR_threshold = 10 #Survey threshold [sigma above the mean]
spectral_resolution = 3. #Spectral resolution [Hz]
scan_obs_time = 300 # Observation time per scan [sec]
#max_distance = 746.65 #Maximum distance [pc]
iband = 4200e6 #Instantaneous Bandwidth [Hz]
shape = '*' # Figure shape
color = 'orange' # Figure color
y_label_units = True # Units in Y label
#---------------------------
# Edit values above ^^^^^
#---------------------------
#Calculating limits
zeta_AO = 1e3*0.5/ 1e13
freq_range_norm = (band/central_freq)
SEFD = calc_SEFD(calc_DishArea(dish_diam), dish_Tsys, eff=dish_app_eff) # 10 Jy (GBT)
SEFD = 15.2522
Sens = calc_Sensitivity(SNR_threshold, spectral_resolution,scan_obs_time,SEFD=SEFD)
dist_m = (max_distance*3.26156 * u.lyr.to('m'))
EIRP = calc_EIRP_min(dist_m,Sens)
#EIRP = 970.5e12
survey_rarity = N_stars*freq_range_norm
survey_speed = SEFD**2*spectral_resolution/iband
survey_sky = N_stars * calc_BeamSize(dish_diam,central_freq)
survey_DFM = survey_sky * band / Sens**(3/2.)
def print_project():
print('~o~', project ,' (', telescope,') ', '~o~')
print('SEFD :', SEFD)
print('Sens :', Sens)
print('EIRP :', EIRP)
print('Max Distance (m) :', dist_m)
print('BeamSize :', calc_BeamSize(dish_diam,central_freq))
print('Sky Coverage :', survey_sky)
print('CWTFM :', zeta_AO *(EIRP) / (survey_rarity))
print('DFM :', survey_DFM)
print_project()
#---------------------------
#Comparing SETI limits
if save:
compare_SETI_limits(EIRP,survey_rarity,shape=shape,color=color,project=project,y_label_units=y_label_units, save_as='XBand_seti_compare')
xband_dict = {'EIRP':EIRP, 'rarity':survey_rarity, 'shape':shape, 'color':color, 'project':project, 'y_label_units':y_label_units}
return xband_dict
| 2,630 | 33.168831 | 145 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/seti_limits_py3/color_coded_ET_power_law.py
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from astropy import units as u
from matplotlib.patches import Polygon
#plt.savefig(image_filename, format='png',bbox_inches='tight')
from scipy import stats
#import add_SETI_limits
def calc_DishArea(d):
""" Compute dish area
d = dish diameter
"""
return np.pi * (d/2)**2
def calc_BeamSize(d,v,verbose=False):
""" Compute BeamSize
d = dish diameter
v = frequency
"""
c = 2.998e8 #speed of light
if verbose:
print('\nBeam is: %f \n'%(1.22* (c/(d*v)) *57.2958))
return (1.22* (c/(d*v)) *57.2958/2)**2*np.pi
def calc_SEFD(A, Tsys, eff=1.0):
""" Calculate SEFD
Tsys = system temperature
A = collecting area
Ae = effective collecting area
eff = aperture efficency (0.0 to 1.0)
"""
kb = 1.3806488e3 # 1.38064852e-23 Boltzmann constant
Ae = A*eff
return 2 * Tsys * kb / Ae
def calc_Sensitivity(m, nu, t, SEFD=0, Tsys=10,eff=1.0,A=100,npol=2.,narrow=True):
""" Minimum detectable luminosity for narrowband emission
Tsys = system temperature
A = collecting area
m = threshold, (e.g. 10)
nu = channel bandwidth
t = observing time
narrow = True if signal is narrower than spectral resolution.
"""
if not SEFD:
sefd = calc_SEFD(A, Tsys, eff=eff)
else:
sefd = SEFD
if narrow:
sens = m * sefd * np.sqrt(nu/(npol*t))
else:
sens = m * sefd / np.sqrt(npol*nu*t)
return sens
def calc_EIRP_min(d,Sens):
""" Minimum detectable luminosity (EIRP) for narrowband emission.
d = distance to target star []
Sens = sensitivity of the obs (Jy)
"""
#1 Jy = 1e-26 W/m2/Hz)
return 4 * np.pi * d**2 * Sens *1e-26
def calc_gain(ll, d):
""" Gain of a dish telescope
ll = wavelength (lambda)
d = dish diameter (m)
"""
return (np.pi * d / ll)**2
def calc_NP_law(alpha,P):
''' Calcualtes the power law, given a the power exponend and an array of EIRP powers in W. Based on Gray&Mooley
'''
# No = 1e25**alpha # Not sure how I got 1e25 for the transmitter power, but from the plot I get a lower number.
No = 1.e21**alpha
NP = No*(1./P**(alpha))
NPn = NP*1e3/4e11 # Normalized to stars in MW (4e11), and BW (1e3 .. ).
return NPn
def calc_NP_law2(alpha,P):
''' Calcualtes the power law, given a the power exponend and an array of EIRP powers in W. Based on BL-Lband.
'''
No = 706146012574.0**alpha / 304.48
NP = No*(1./P**(alpha))
return NP
def calc_NP_law3(P):
''' Calcualtes the power law, given a the power exponend and an array of EIRP powers in Watts. Based on fit from both above.
'''
E1 = 5.e+11
S1 = 350
E2 = 1.98792219e+21 #2.75879335e+21
S2 = 7.14285714e+08
# Solving for alpha
alpha = np.log10(S2/S1) /np.log10(E2/E1)
print('The exponent (alpha) = ', alpha)
# Solving for No
No = E1**alpha / S1
NP = No*(1./P**(alpha))
return NP
def ET_power_law(verbose=False):
#---------------------------
# Standardizing the sensitivity of telescopes by figuring out the max EIRP of a transmitter they could have found.
# For this, I need the sensitivity of the observation, given SEFD and so on.
#Standard distance (100 ly)
dist_std = (100. * u.lyr.to('m'))
#So decided not to use the distance above, it does makes sense if the original distance is shorter, but not if the distance is farther away (like in Siemion2013)
#nomalization to L_AO = 2e13 W, fraq_freq = 1/2 and N_stars = 1k
zeta_AO = 1 #np.log10(1e3*.5)/ np.log10(2e13)
zeta_AO = 1e3*0.5/ 1e13
#---------------------------
#BL
telescope = 'GBT'
BL_stars= 692
band = 660e6 #(1.1-1.2,1.34-1.9)
freq_range_norm = (band/1.5e9)
BL_SEFD = calc_SEFD(calc_DishArea(100), 20, eff=0.72) # 10 Jy (GBT)
m=25.; nu =3.; t=300.
Sens = calc_Sensitivity(m, nu,t,SEFD=BL_SEFD)
dist_std = (50*3.26156 * u.lyr.to('m')) # Max distance approx
BL_EIRP = calc_EIRP_min(dist_std,Sens)
BL_rarity = BL_stars*freq_range_norm
iband = 800e6
BL_speed = BL_SEFD**2*nu/iband
BL_sky = BL_stars * calc_BeamSize(100,1.5e9)
BL_DFM = BL_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (BL):', BL_SEFD)
print('Sens (BL):', Sens)
print('EIRP (BL):', BL_EIRP)
print('BeamSize (BL):', calc_BeamSize(100,1.5e9))
print('Sky Coverage (BL):', BL_sky)
print('CWTFM (BL):', zeta_AO *(BL_EIRP) / (BL_rarity))
print('DFM (BL):', BL_DFM)
print('~o~')
#----------
# Gray & Mooley 2017
telescope=['VLA']
GM_stars= 1e12
band = np.array([1e6, 0.125e6])
central_freq = np.array([1.4e9,8.4e9])
freq_range_norm = (band/central_freq)
SEFD = calc_SEFD(calc_DishArea(25),35, eff=0.45) / np.sqrt(27*26.) #Perley 2009
m=7.0; nu =[122., 15.3]; t=[20*60,5*60]
Sens = np.array([calc_Sensitivity(m, nu[0],t[0],SEFD=SEFD),calc_Sensitivity(m, nu[1],t[1],SEFD=SEFD)])
dist_std = (2.5e6 * u.lyr.to('m')) # Max distance approx
GM_EIRP = np.array([calc_EIRP_min(dist_std,Sen) for Sen in Sens])
GM_rarity = GM_stars*freq_range_norm
GM_rarity_tot = GM_rarity.sum()
GM_EIRP_tot = GM_EIRP.max()
iband = 1e6
GM_speed = SEFD**2*nu[0]/iband
GM_sky = 8*(0.95/2.)**2*np.pi # 0.95deg images #NOTE: in Enriquez 2017, we used this as the radius, but it is the diameter.
GM_DFM = GM_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Gray & Mooley 2017):', SEFD)
print('Sens (Gray & Mooley 2017):', Sens)
print('EIRP (Gray & Mooley 2017):', GM_EIRP)
print('BeamSize (Gray & Mooley 2017):', end=' ')
print('Sky Coverage (Gray & Mooley 2017):', GM_sky)
print('CWTFM (Gray & Mooley 2017):', zeta_AO * (GM_EIRP_tot)/ (GM_rarity_tot)) #,'or', zeta_AO*stats.hmean(GM_EIRP/GM_rarity)
print('DFM (Gray & Mooley 2017):', GM_DFM)
print('~o~')
#----------
#Phoenix
telescope = ['Arecibo','Arecibo; Parkes,Parkes,NRAO140']
Ph_stars = np.array([290,371,206,105,195]) # From Harp2016
#180MHz skip Backus2002; band from Harp2016
band = np.array([(1.75-1.2)*1e9 - 180e6,(3.0-1.75)*1e9,(1.75-1.2)*1e9, (3.0-1.75)*1e9, (3.0-1.2)*1e9])
central_freq = np.array([1.5e9,2.375e9,1.5e9,2.375e9,2.1e9])
freq_range_norm = (band/central_freq)
Dish_D = np.array([305,225,64,64,43]) # Email from G. Harp
SEFD = np.array([calc_SEFD(calc_DishArea(Dish_D[0]), 40, eff=0.7),
calc_SEFD(calc_DishArea(Dish_D[1]), 40, eff=0.7),
calc_SEFD(calc_DishArea(Dish_D[2]), 35, eff=0.7),
calc_SEFD(calc_DishArea(Dish_D[3]), 35, eff=0.7),
calc_SEFD(calc_DishArea(Dish_D[4]), 35, eff=0.7)])
m=1; nu =1.0; t=[276,195,276,138,552]
Sens1 = np.array([calc_Sensitivity(m,nu,t[i],SEFD=SEFD[i],narrow=False) for i in range(len(SEFD))])
Sens = np.array([16,16,100,100,100]) # From Harp2016
# Max distance approx ; 147Ly median distance Shostalk(2000), ~700 farthest
dist_std = (700 * u.lyr.to('m'))
Ph_EIRP = np.array([calc_EIRP_min(dist_std,Sen) for Sen in Sens])
Ph_rarity = Ph_stars*freq_range_norm
Ph_stars_tot = Ph_stars.sum()
Ph_rarity_tot = Ph_rarity.sum()
Ph_EIRP_tot = Ph_EIRP.max()
iband = 20e6
Ph_speed = SEFD.mean()**2*nu/iband #Note: This value is calculated with self calculated SEFD values (which are not completely consistent with values expected from Harp 2016 values).
Ph_sky = Ph_stars * np.array([calc_BeamSize(Dish_D[i],central_freq[i]) for i in range(len(Dish_D))])
Ph_DFM = Ph_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Phoenix):', SEFD)
print('Sens (Phoenix):', Sens1)
print('Sens_Harp (Phoenix):', Sens)
print('EIRP (Phoenix):', Ph_EIRP)
print('BeamSize (Phoenix):', np.array([calc_BeamSize(Dish_D[i],central_freq[i]) for i in range(len(Dish_D))]))
print('Sky Coverage (Phoenix):', Ph_sky.sum())
print('CWTFM (Phoenix):', zeta_AO * (Ph_EIRP)/ (Ph_rarity))
print('CWTFM (Phoenix):', zeta_AO * (Ph_EIRP_tot)/ (Ph_rarity_tot))
print('DFM (Phoenix):', Ph_DFM)
print('~o~')
#----------
#ATA
telescope = 'ATA'
ATA_stars= np.array([65,1959,2822,7459])
band = np.array([8000.e6,2040.e6,337.e6,268.e6]) #There are 73MHz which are RFI flagged, it is ignored here.
central_freq = 5e9 # 1-9 GHz
freq_range_norm = (band/central_freq)
#Tsys = (80+120+95+137)/4. = 108
SEFD = calc_SEFD(calc_DishArea(6.1), 108, eff=0.58) / np.sqrt(27*26)
SEFDs = np.array([SEFD,SEFD,SEFD,SEFD])
m=6.5; nu =0.7; t=93.
dist_std = np.array([(1.4e3*3.26156 * u.lyr.to('m')),(1.1e3*3.26156 * u.lyr.to('m')),(300 * u.lyr.to('m')),(500 * u.lyr.to('m'))]) #Turnbull 2003 for HabCat
Sens = np.array([calc_Sensitivity(m,nu,t,SEFD=SEF,narrow=False) for SEF in SEFDs])
ATA_EIRP = np.array([calc_EIRP_min(dist_std[i],Sens[i]) for i in range(len(Sens))])
ATA_rarity = ATA_stars*freq_range_norm
ATA_rarity_tot = ATA_rarity.sum()
ATA_stars_tot = ATA_stars.sum()
ATA_EIRP_tot = ATA_EIRP.max()
iband = 70e6
ATA_speed = SEFD**2*nu/iband
ATA_sky = ATA_stars * 3*6./4.*np.pi/3600. # beam 3'x6' at 1.4GHz
ATA_DFM = ATA_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (ATA):', SEFD)
print('Sens (ATA):', Sens)
print('EIRP (ATA):', ATA_EIRP)
print('BeamSize (ATA):', end=' ')
print('Sky Coverage (ATA):', ATA_sky.sum())
print('CWTFM (ATA):', zeta_AO * (ATA_EIRP_tot)/ (ATA_rarity_tot))
print('DFM (ATA):', ATA_DFM)
print('~o~')
#----------
#Siemion 2013
telescope = 'GBT'
Siemion_stars= 86
band = 800e6 - 130e6 #(1.1-1.2,1.33-1.9)
freq_range_norm = (band/1.5e9)
SEFD= calc_SEFD(calc_DishArea(100), 20, eff=0.72) # 10 Jy (GBT)
m=25.; nu =1.; t=300.
Sens = calc_Sensitivity(m, nu,t,SEFD=SEFD)
dist_std = (1.1e3*3.26156 * u.lyr.to('m')) # Max distance approx
Siemion_EIRP = calc_EIRP_min(dist_std,Sens)
Siemion_rarity = Siemion_stars*freq_range_norm
iband = 800e6
Siemion_speed = (SEFD/0.85)**2*nu/iband # 0.85 ==> Siemion priv. comm. (due to 2 bit data format)
Siemion_sky = Siemion_stars * calc_BeamSize(100,1.5e9)
Siemion_DFM = Siemion_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Siemion2013):', SEFD)
print('Sens (Siemion2013):', Sens)
print('EIRP (Siemion2013):', Siemion_EIRP)
print('BeamSize (Siemion2013):',calc_BeamSize(100,1.5e9))
print('Sky Coverage (Siemion2013):', Siemion_sky)
print('CWTFM (Siemion2013):', zeta_AO * (Siemion_EIRP)/ (Siemion_rarity))
print('DFM (Siemion2013):', Siemion_DFM)
print('~o~')
#----------
#Valdes 1986
telescope='HCRO'
Valdes_stars = np.array([53, 12])
band = np.array([256*4883, 1024*76])
freq_range_norm = (band/1.516e9)
SEFD = calc_SEFD(calc_DishArea(26), 100, eff=0.5)
m=3.0; nu =[4883., 76.]; t=3000.
Sens = np.array([calc_Sensitivity(m, nu[0],t,SEFD=SEFD,npol=1.),calc_Sensitivity(m, nu[1],t,SEFD=SEFD,npol=1.)])
dist_std = (20 * u.lyr.to('m')) # Max distance approx
Valdes_EIRP = np.array([calc_EIRP_min(dist_std,Sen) for Sen in Sens])
Valdes_rarity = Valdes_stars*freq_range_norm
Valdes_rarity_tot = Valdes_rarity.sum()
Valdes_EIRP_tot = Valdes_EIRP.max()
iband = 256*4883
Valdes_speed = SEFD**2*nu[0]/iband
Valdes_sky = (Valdes_stars * calc_BeamSize(26,1.5e9)).sum()
Valdes_DFM = Valdes_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Valdes 1986):', SEFD)
print('Sens (Valdes 1986):', Sens)
print('EIRP (Valdes 1986):', Valdes_EIRP)
print('BeamSize (Valdes 1986):',calc_BeamSize(26,1.5e9))
print('Sky Coverage (Valdes 1986):', Valdes_sky)
print('CWTFM (Valdes 1986):', zeta_AO * (Valdes_EIRP_tot)/ (Valdes_rarity_tot))
print('DFM (Valdes 1986):', Valdes_DFM)
print('~o~')
#----------
#Tarter 1980
telsecope = 'NRAO 91m'
Tarter_stars=201
band = 360e3*4.
freq_range_norm = (band/1.666e9)
SEFD = calc_SEFD(calc_DishArea(91), 70, eff=0.6)
m=12.0; nu =5.5; t= 45
Sens = calc_Sensitivity(m, nu,t,SEFD=SEFD)
dist_std = (25*3.26156* u.lyr.to('m')) # Max distance approx
Tarter_EIRP = calc_EIRP_min(dist_std,Sens)
Tarter_rarity = Tarter_stars*freq_range_norm
iband = 360e3
Tarter_speed = SEFD**2*nu/iband
Tarter_sky = Tarter_stars * calc_BeamSize(91,1.666e9)
Tarter_DFM = Tarter_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Tarter1980):', SEFD)
print('Sens (Tarter1980):', Sens)
print('EIRP (Tarter1980):', Tarter_EIRP)
print('BeamSize (Tarter1980):', calc_BeamSize(91,1.666e9))
print('Sky Coverage (Tarter1980):', Tarter_sky)
print('CWTFM (Tarter1980):', zeta_AO * (Tarter_EIRP)/ (Tarter_rarity))
print('DFM (Tarter1980):', Tarter_DFM)
print('~o~')
#----------
#Verschuur1973
telescope=['300ft Telescope', '140ft Telescope']
Verschuur_stars=np.array([3,8])
band = np.array([0.6e6,20e6])
freq_range_norm = (band/1.426e9)
SEFD = np.array([calc_SEFD(calc_DishArea(300*0.3048),110, eff=0.75),calc_SEFD(calc_DishArea(140*0.3048),48, eff=0.75)]) #**NOTE** the 0.75 for the 140' is not real
m=3.0; nu =[490.,7.2e3]; t= [4*60.,5*60.]
Sens = np.array([calc_Sensitivity(m, nu[0],t[0],SEFD=SEFD[0]),calc_Sensitivity(m, nu[1],t[1],SEFD=SEFD[1])])
dist_std = (5*3.26156 * u.lyr.to('m'))
Verschuur_EIRP = np.array([calc_EIRP_min(dist_std,Sen) for Sen in Sens])
Verschuur_rarity = Verschuur_stars*freq_range_norm
Verschuur_rarity_tot = Verschuur_rarity.sum()
Verschuur_EIRP_tot = Verschuur_EIRP.max()
iband = np.array([0.6e6, 2.5e6]) #300 ft: Two 192-channel receivers (at 130 km/s with 4.74kHz=1km/s at this freq.)
Verschuur_speed = SEFD.min()**2*nu[0]/iband[0]
Verschuur_sky = (Verschuur_stars * np.array([calc_BeamSize(300*0.3048,1.42e9),calc_BeamSize(140*0.3048,1.42e9)])).sum()*2 # The two comes from the off beam.
Verschuur_DFM = Verschuur_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Verschuur1973):', SEFD)
print('Sens (Verschuur1973):', Sens)
print('EIRP (Verschuur1973):', Verschuur_EIRP)
print('BeamSize (Verschuur1973):', np.array([calc_BeamSize(300*0.3048,1.42e9),calc_BeamSize(140*0.3048,1.42e9)]))
print('Sky Coverage (Verschuur1973):', Verschuur_sky)
print('CWTFM (Verschuur1973):', zeta_AO * (Verschuur_EIRP_tot)/ (Verschuur_rarity_tot))
print('DFM (Verschuur1973):', Verschuur_DFM)
print('~o~')
#----------
#META Horowitz&Sagan
telescope=''
Horowitz_stars= 1e7
band = 1.2e6
freq_range_norm = (band/1.42e9)
SEFD = calc_SEFD(calc_DishArea(26),85, eff=0.5) # eff=0.5 ==> We were unable to find a value in the literature. We assume a similar value to the antenna of the same dimensions from Valdes & Freitas (1986).
m=30; nu =0.05; t=20
Sens = calc_Sensitivity(m, nu,t,SEFD=SEFD,narrow=False)
dist_std = (700*3.26156 * u.lyr.to('m')) # Max distance: # Horowitz & Sagan (1993) suggested values for the number of stars given a distance, based on the power of an isotropic beacon.
Horowitz_EIRP = calc_EIRP_min(dist_std,Sens)
Horowitz_rarity = Horowitz_stars*freq_range_norm
iband = 400e3
Horowitz_speed = SEFD**2*nu/iband
Horowitz_sky = 41253*.68 # Horowitz_stars * calc_BeamSize(26,1.42e9)
Horowitz_DFM = Horowitz_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Horowitz):', SEFD)
print('Sens (Horowitz):', Sens)
print('EIRP (Horowitz):', Horowitz_EIRP)
print('BeamSize (Horowitz):', end=' ')
print('Sky Coverage (Horowitz):', Horowitz_sky)
print('CWTFM (Horowitz):', zeta_AO * (Horowitz_EIRP)/ (Horowitz_rarity))
print('DFM (Horowitz):', Horowitz_DFM)
print('~o~')
#---------------------------
#BL
Price_telescopes = ['GBT','GBT','Parkes']
Price_BL_stars = np.array([883,1006,195])
Price_band = np.array([(1.2-1.025+1.925-1.34)*1e9,(2.72-1.82)*1e9,(3.444-2.574)*1e9])
Price_central_freq = np.array([1.5e9,2.27e9,3.0e9,])
Price_freq_range_norm = (Price_band/Price_central_freq)
Dish_D = np.array([100,100,64])
Price_BL_SEFD = np.array([calc_SEFD(calc_DishArea(Dish_D[0]), 20, eff=0.72),
calc_SEFD(calc_DishArea(Dish_D[1]), 20, eff=0.72),
calc_SEFD(calc_DishArea(Dish_D[2]), 35, eff=0.7),
])
m=10.; nu =3.; t=300.
Price_Sens = np.array([calc_Sensitivity(m,nu,t,SEFD=Price_BL_SEFD[i]) for i in range(len(Price_BL_SEFD))])
dist_std = (50*3.26156 * u.lyr.to('m'))
Price_BL_EIRP = np.array([calc_EIRP_min(dist_std,Sen) for Sen in Price_Sens])
Price_BL_rarity = Price_BL_stars*Price_freq_range_norm
Price_BL_stars_tot = Price_BL_stars[:2].sum()
Price_rarity_tot = Price_BL_rarity[:2].sum()
Price_EIRP_tot = Price_BL_EIRP[:2].max()
iband = 900e6
Price_BL_speed = Price_BL_SEFD.mean()**2*nu/iband
Price_BL_sky = Price_BL_stars * np.array([calc_BeamSize(Dish_D[i],Price_central_freq[i]) for i in range(len(Dish_D))])
Price_BL_DFM = Price_BL_sky * Price_band / Price_Sens**(3/2.)
if verbose:
print('SEFD (Price_BL):', Price_BL_SEFD)
print('Sens (Price_BL):', Price_Sens)
print('EIRP (Price_BL):', Price_BL_EIRP)
print('BeamSize (Price_BL):', np.array([calc_BeamSize(Dish_D[i],Price_central_freq[i]) for i in range(len(Dish_D))]))
print('Sky Coverage (Price_BL):', Price_BL_sky.sum())
print('CWTFM (Price_BL):', zeta_AO *(Price_BL_EIRP) / (Price_BL_rarity))
print('CWTFM (Price_BL_tot):', zeta_AO *(Price_EIRP_tot) / (Price_rarity_tot))
print('DFM (Price_BL):', Price_BL_DFM)
print('~o~')
#---------------------------
#Tremblay 2020
#tremblay = add_SETI_limits.add_SETI_limits('Tremblay 2020', 'MWA', 10355066, 128e6-98e6, (128e6-98e6)2, 3e3, tsys, app_eff, snr, spec_res, obs_time_per_scan, dist_max, 30.72e6, fig_shape, fig_color)
tremblay_transmitter_rate = -2.86
tremblay_EIRP_min = 17.23
#---------------------------------------------------------------------------------
#EIRP values in watts.
P = np.array([1e12,1e14,1e16,1e18,1e20,1e22])
#---------------------------
# Luminosity function limit on putative transmitters.
#plt.plot(np.log10(P),np.log10(calc_NP_law3(P)),lw=20,color='gray',alpha=0.3)#,label=r'$\alpha$: %s'%alpha)
arecibo, = plt.plot([17,17],[-11,-9.5],'--',lw=2,color='k',alpha=1,label='Arecibo')
arecibo1, = plt.plot([17,17], [-6.5, 4], '--', lw=2, color='k', alpha=1)
plt.text(17, -9.3, r'$\it{Solar~Power}$', {'va': 'bottom', 'ha': 'center'}, rotation=90, fontsize=24)
solarpower, = plt.plot([13,13],[-11,-9],lw=2,color='k',alpha=1, label='Solar Power')
solarpower1, = plt.plot([13,13], [-7, 4], lw=2, color='k', alpha=1)
plt.text(13, -8.8, r'$\it{Arecibo}$', {'va': 'bottom', 'ha': 'center'}, rotation=90, fontsize=24)
#---------------------------
alpha = 0.7
markersize = 20
fontsize = 20
ticksize = fontsize - 2
dot_size = markersize - 12
## PLOTTING
# different colors for different bands...
# L: Red
# S: Green
# C: Orange
# X: Blue
# if surveys overlap one or more bands, just add a markeredgecolor
LBand = 'dodgerblue'
SBand = 'firebrick'
CBand = 'seagreen'
XBand = 'peru'
price1, = plt.plot([np.log10(Price_BL_EIRP[2])],[np.log10(1./Price_BL_rarity[2])],'h', color = SBand, markersize = markersize)
price2, = plt.plot([np.log10(Price_EIRP_tot)],[np.log10(1./Price_rarity_tot)],'h', color = LBand,markersize = markersize, markeredgewidth=2, markeredgecolor=SBand)
#-------
enriquez, = plt.plot([np.log10(BL_EIRP)],[np.log10(1./BL_rarity)],'D', color = LBand,markeredgecolor='#1c608e',markersize = markersize-8, markeredgewidth=2)
# Gray and Mooley did a lot at L-Band and 1 at X-Band
graymooley, = plt.plot(np.log10(GM_EIRP),np.log10(1./GM_rarity),'o',color =LBand,markeredgecolor=XBand,markeredgewidth=2,markersize = markersize)
# All of Harp observations were conducted at L-Band and S-Band
#harpab, = plt.plot(np.log10(ATA_EIRP[0:2]),np.log10(1./ATA_rarity[0:2]),'^',color =LBand,markeredgecolor=SBand,markeredgewidth=2, markersize = markersize)
#harpc, = plt.plot(np.log10(ATA_EIRP[2]),np.log10(1./ATA_rarity[2]),'s',color =LBand,markeredgecolor=SBand,markeredgewidth=2,markersize = markersize)
#harpd, = plt.plot(np.log10(ATA_EIRP[3]),np.log10(1./ATA_rarity[3]),'h',color =LBand,markeredgecolor=SBand,markeredgewidth=2,markersize = markersize)
harpall1, = plt.plot(np.log10(ATA_EIRP[0]),np.log10(1./ATA_rarity_tot),marker='^', color=CBand,markersize = markersize,markeredgecolor=SBand,markeredgewidth=2,linestyle='None')
#harpall2, = plt.plot(np.log10(ATA_EIRP[0:1]),np.log10(1./ATA_rarity[0:1]),marker='o', color=LBand, markeredgecolor=CBand)
#harpall2, = plt.plot(np.log10(ATA_EIRP[0]),np.log10(1./ATA_rarity_tot),marker='^', color=LBand,markersize = markersize-2,linestyle='None')
siemion1, = plt.plot([np.log10(Siemion_EIRP)],[np.log10(1./Siemion_rarity)],'>',color =LBand,markersize = markersize, linestyle='None')
#siemion2, = plt.plot([np.log10(Siemion_EIRP)],[np.log10(1./Siemion_rarity)],marker='o',color=LBand,markersize = dot_size, linestyle='None')
#phoenix, = plt.plot(np.log10(Ph_EIRP),np.log10(1./Ph_rarity),'<b',color=SBand,markersize = markersize)
phoenixall, = plt.plot([np.log10(Ph_EIRP_tot)],[np.log10(1./Ph_rarity_tot)],marker='<',color=SBand,markersize = markersize)
horowitz_sagan, = plt.plot(np.log10(Horowitz_EIRP),np.log10(1./Horowitz_rarity),marker='s',color =SBand,markeredgecolor='w',markersize = markersize, linestyle='None')
#valdez, = plt.plot(np.log10(Valdes_EIRP),np.log10(1./Valdes_rarity),'sy',color =LBand, markersize = markersize)
#tarter, = plt.plot([np.log10(Tarter_EIRP)],[np.log10(1./Tarter_rarity)],'vc',color =LBand,markersize = markersize)
#verschuur, = plt.plot(np.log10(Verschuur_EIRP),np.log10(1./Verschuur_rarity),'sm',color =LBand,markersize = markersize)
mwa, = plt.plot(tremblay_EIRP_min, tremblay_transmitter_rate, marker='X', color='tab:purple', markersize=markersize, markeredgecolor='w', linestyle='None')#, linestyle='None')
legend_handles = {'p1':price1, 'p2':price2, 'e':enriquez, 'gm':graymooley, 'h_a1':harpall1, 's1':siemion1, 'pha':phoenixall, 'hs':horowitz_sagan, 'm':mwa}
#legend_handles = {'p1':price1, 'p2':price2, 'e':enriquez, 'gm':graymooley, 'h_ab':harpab, 'h_c':harpc, 'h_d':harpd, 'h_a1':harpall1, 'h_a2':harpall2, 's1':siemion1,'s2':siemion2, 'ph':phoenix, 'pha':phoenixall, 'hs':horowitz_sagan, 'v':valdez, 't':tarter, 'ver':verschuur, 'm':mwa}
#legend_handles = {'p1':price1, 'p2':price2, 'e':enriquez, 'gm':graymooley, 'h_ab':harpab, 'h_c':harpc, 'h_d':harpd, 'h_a1':harpall1, 'h_a2':harpall2, 's1':siemion1, 's2':siemion2, 'ph':phoenix, 'pha':phoenixall, 'hs':horowitz_sagan, 'v':valdez, 't':tarter, 'ver':verschuur}
return legend_handles
def compare_SETI_limits(EIRP,rarity,shape='o',color='k',project='This Project',y_label_units=True, save_as=None):
''' Compare SETI project with previus surveys.
'''
#---------------------------------------------------------------------------------
# plotting setup
plt.ion()
plt.figure(figsize=(15, 10))
alpha = 0.7
markersize = 20
fontsize = 20
ticksize = fontsize - 2
dot_size = markersize - 12
plt.plot([np.log10(EIRP)],[np.log10(1./rarity)],marker = shape, color = color,markersize = markersize, label=project)
ET_power_law()
plt.legend(numpoints=1,scatterpoints=1,fancybox=True, shadow=True)
plt.xlabel('EIRP [log(W)]',fontsize = fontsize)
#plt.ylabel('Transmiter Galactic Rarity [log((Nstars*BW)^-1)]',fontsize=fontsize)
if y_label_units:
plt.ylabel('Transmitter Rate \n [log(1/(Nstars * rel_BW))]',fontsize=fontsize)
else:
plt.ylabel('Transmitter Rate ',fontsize=fontsize)
plt.xticks(fontsize = ticksize)
plt.yticks(fontsize = ticksize)
#plt.ylim(-10,4)
#plt.xlim(10,23)
image_filename = 'SETI_limits_comparison'
if save_as is not None:
image_filename = save_as
from datetime import datetime
image_filename = image_filename + datetime.now().strftime("_%m-%d-%y_%H:%M:%S") + '.png'
plt.savefig(image_filename, format='png',bbox_inches='tight')
# plt.savefig('Transmitter_Rarity_FoM.pdf', format='pdf', dpi=300,bbox_inches='tight')
| 24,973 | 38.144201 | 286 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/seti_limits_py3/LBand_SETI_compare.py
|
from ET_power_law import *
import matplotlib.pylab as plt
def lband_compare(max_distance, save=False):
#---------------------------
# Edit values below vvvvvv
#---------------------------
# Your new values ( e.g. Enriquez 2017 x 100)
project = 'This Study: L-Band' #Project name
telescope = 'GBT' # Telescope name
N_stars = 5 # Estimated number of stars
band = 660e6 # Total bandwidth [Hz]
central_freq = 1.5e9 # Central bandwidth [Hz]
dish_diam = 100 #Telescope diameter meters (single dish in current version)
dish_Tsys = 15.60 #Telescope Tsys [Kelvin]
dish_app_eff = 0.72 #Telescope Apperture Efficiency
SNR_threshold = 10 #Survey threshold [sigma above the mean]
spectral_resolution = 3. #Spectral resolution [Hz]
scan_obs_time = 300 # Observation time per scan [sec]
#max_distance = 505.33 # Maximum distance [pc]
iband = 800e6 #Instantaneous Bandwidth [Hz]
shape = '*' # Figure shape
color = 'r' # Figure color
y_label_units = True # Units in Y label
#---------------------------
# Edit values above ^^^^^
#---------------------------
#Calculating limits
zeta_AO = 1e3*0.5/ 1e13
freq_range_norm = (band/central_freq)
SEFD = calc_SEFD(calc_DishArea(dish_diam), dish_Tsys, eff=dish_app_eff) # 10 Jy (GBT)
SEFD = 7.7251
Sens = calc_Sensitivity(SNR_threshold, spectral_resolution,scan_obs_time,SEFD=SEFD)
dist_m = (max_distance*3.26156 * u.lyr.to('m'))
EIRP = calc_EIRP_min(dist_m,Sens)
#EIRP = 1926e12
#EIRP = 491.6e12
survey_rarity = N_stars*freq_range_norm
survey_speed = SEFD**2*spectral_resolution/iband
survey_sky = N_stars * calc_BeamSize(dish_diam,central_freq)
survey_DFM = survey_sky * band / Sens**(3/2.)
def print_project():
print('~o~', project ,' (', telescope,') ', '~o~')
print('SEFD :', SEFD)
print('Sens :', Sens)
print('Max Distance (m) :', dist_m)
print('EIRP :', EIRP)
print('BeamSize :', calc_BeamSize(dish_diam,central_freq))
print('Sky Coverage :', survey_sky)
print('CWTFM :', zeta_AO *(EIRP) / (survey_rarity))
print('DFM :', survey_DFM)
print_project()
#---------------------------
#Comparing SETI limits
if save:
compare_SETI_limits(EIRP,survey_rarity,shape=shape,color=color,project=project,y_label_units=y_label_units, save_as='LBand_seti_compare')
lband_dict = {'EIRP':EIRP, 'rarity':survey_rarity, 'shape':shape, 'color':color, 'project':project, 'y_label_units':y_label_units}
return lband_dict
| 2,641 | 32.025 | 145 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/seti_limits_py3/ET_power_law.py
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from astropy import units as u
from matplotlib.patches import Polygon
#plt.savefig(image_filename, format='png',bbox_inches='tight')
from scipy import stats
def calc_DishArea(d):
""" Compute dish area
d = dish diameter
"""
return np.pi * (d/2)**2
def calc_BeamSize(d,v,verbose=False):
""" Compute BeamSize
d = dish diameter
v = frequency
"""
c = 2.998e8 #speed of light
if verbose:
print('\nBeam is: %f \n'%(1.22* (c/(d*v)) *57.2958))
return (1.22* (c/(d*v)) *57.2958/2)**2*np.pi
def calc_SEFD(A, Tsys, eff=1.0):
""" Calculate SEFD
Tsys = system temperature
A = collecting area
Ae = effective collecting area
eff = aperture efficency (0.0 to 1.0)
"""
kb = 1.3806488e3 # 1.38064852e-23 Boltzmann constant
Ae = A*eff
return 2 * Tsys * kb / Ae
def calc_Sensitivity(m, nu, t, SEFD=0, Tsys=10,eff=1.0,A=100,npol=2.,narrow=True):
""" Minimum detectable luminosity for narrowband emission
Tsys = system temperature
A = collecting area
m = threshold, (e.g. 10)
nu = channel bandwidth
t = observing time
narrow = True if signal is narrower than spectral resolution.
"""
if not SEFD:
sefd = calc_SEFD(A, Tsys, eff=eff)
else:
sefd = SEFD
if narrow:
sens = m * sefd * np.sqrt(nu/(npol*t))
else:
sens = m * sefd / np.sqrt(npol*nu*t)
return sens
def calc_EIRP_min(d,Sens):
""" Minimum detectable luminosity (EIRP) for narrowband emission.
d = distance to target star []
Sens = sensitivity of the obs (Jy)
"""
#1 Jy = 1e-26 W/m2/Hz)
return 4 * np.pi * d**2 * Sens *1e-26
def calc_gain(ll, d):
""" Gain of a dish telescope
ll = wavelength (lambda)
d = dish diameter (m)
"""
return (np.pi * d / ll)**2
def calc_NP_law(alpha,P):
''' Calcualtes the power law, given a the power exponend and an array of EIRP powers in W. Based on Gray&Mooley
'''
# No = 1e25**alpha # Not sure how I got 1e25 for the transmitter power, but from the plot I get a lower number.
No = 1.e21**alpha
NP = No*(1./P**(alpha))
NPn = NP*1e3/4e11 # Normalized to stars in MW (4e11), and BW (1e3 .. ).
return NPn
def calc_NP_law2(alpha,P):
''' Calcualtes the power law, given a the power exponend and an array of EIRP powers in W. Based on BL-Lband.
'''
No = 706146012574.0**alpha / 304.48
NP = No*(1./P**(alpha))
return NP
def calc_NP_law3(P):
''' Calcualtes the power law, given a the power exponend and an array of EIRP powers in Watts. Based on fit from both above.
'''
E1 = 5.e+11
S1 = 350
E2 = 1.98792219e+21 #2.75879335e+21
S2 = 7.14285714e+08
# Solving for alpha
alpha = np.log10(S2/S1) /np.log10(E2/E1)
print('The exponent (alpha) = ', alpha)
# Solving for No
No = E1**alpha / S1
NP = No*(1./P**(alpha))
return NP
def ET_power_law(verbose=False):
#---------------------------
# Standardizing the sensitivity of telescopes by figuring out the max EIRP of a transmitter they could have found.
# For this, I need the sensitivity of the observation, given SEFD and so on.
#Standard distance (100 ly)
dist_std = (100. * u.lyr.to('m'))
#So decided not to use the distance above, it does makes sense if the original distance is shorter, but not if the distance is farther away (like in Siemion2013)
#nomalization to L_AO = 2e13 W, fraq_freq = 1/2 and N_stars = 1k
zeta_AO = 1 #np.log10(1e3*.5)/ np.log10(2e13)
zeta_AO = 1e3*0.5/ 1e13
#---------------------------
#BL
telescope = 'GBT'
BL_stars= 692
band = 660e6 #(1.1-1.2,1.34-1.9)
freq_range_norm = (band/1.5e9)
BL_SEFD = calc_SEFD(calc_DishArea(100), 20, eff=0.72) # 10 Jy (GBT)
m=25.; nu =3.; t=300.
Sens = calc_Sensitivity(m, nu,t,SEFD=BL_SEFD)
dist_std = (50*3.26156 * u.lyr.to('m')) # Max distance approx
BL_EIRP = calc_EIRP_min(dist_std,Sens)
BL_rarity = BL_stars*freq_range_norm
iband = 800e6
BL_speed = BL_SEFD**2*nu/iband
BL_sky = BL_stars * calc_BeamSize(100,1.5e9)
BL_DFM = BL_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (BL):', BL_SEFD)
print('Sens (BL):', Sens)
print('EIRP (BL):', BL_EIRP)
print('BeamSize (BL):', calc_BeamSize(100,1.5e9))
print('Sky Coverage (BL):', BL_sky)
print('CWTFM (BL):', zeta_AO *(BL_EIRP) / (BL_rarity))
print('DFM (BL):', BL_DFM)
print('~o~')
#----------
# Gray & Mooley 2017
telescope=['VLA']
GM_stars= 1e12
band = np.array([1e6, 0.125e6])
central_freq = np.array([1.4e9,8.4e9])
freq_range_norm = (band/central_freq)
SEFD = calc_SEFD(calc_DishArea(25),35, eff=0.45) / np.sqrt(27*26.) #Perley 2009
m=7.0; nu =[122., 15.3]; t=[20*60,5*60]
Sens = np.array([calc_Sensitivity(m, nu[0],t[0],SEFD=SEFD),calc_Sensitivity(m, nu[1],t[1],SEFD=SEFD)])
dist_std = (2.5e6 * u.lyr.to('m')) # Max distance approx
GM_EIRP = np.array([calc_EIRP_min(dist_std,Sen) for Sen in Sens])
GM_rarity = GM_stars*freq_range_norm
GM_rarity_tot = GM_rarity.sum()
GM_EIRP_tot = GM_EIRP.max()
iband = 1e6
GM_speed = SEFD**2*nu[0]/iband
GM_sky = 8*(0.95/2.)**2*np.pi # 0.95deg images #NOTE: in Enriquez 2017, we used this as the radius, but it is the diameter.
GM_DFM = GM_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Gray & Mooley 2017):', SEFD)
print('Sens (Gray & Mooley 2017):', Sens)
print('EIRP (Gray & Mooley 2017):', GM_EIRP)
print('BeamSize (Gray & Mooley 2017):', end=' ')
print('Sky Coverage (Gray & Mooley 2017):', GM_sky)
print('CWTFM (Gray & Mooley 2017):', zeta_AO * (GM_EIRP_tot)/ (GM_rarity_tot)) #,'or', zeta_AO*stats.hmean(GM_EIRP/GM_rarity)
print('DFM (Gray & Mooley 2017):', GM_DFM)
print('~o~')
#----------
#Phoenix
telescope = ['Arecibo','Arecibo; Parkes,Parkes,NRAO140']
Ph_stars = np.array([290,371,206,105,195]) # From Harp2016
#180MHz skip Backus2002; band from Harp2016
band = np.array([(1.75-1.2)*1e9 - 180e6,(3.0-1.75)*1e9,(1.75-1.2)*1e9, (3.0-1.75)*1e9, (3.0-1.2)*1e9])
central_freq = np.array([1.5e9,2.375e9,1.5e9,2.375e9,2.1e9])
freq_range_norm = (band/central_freq)
Dish_D = np.array([305,225,64,64,43]) # Email from G. Harp
SEFD = np.array([calc_SEFD(calc_DishArea(Dish_D[0]), 40, eff=0.7),
calc_SEFD(calc_DishArea(Dish_D[1]), 40, eff=0.7),
calc_SEFD(calc_DishArea(Dish_D[2]), 35, eff=0.7),
calc_SEFD(calc_DishArea(Dish_D[3]), 35, eff=0.7),
calc_SEFD(calc_DishArea(Dish_D[4]), 35, eff=0.7)])
m=1; nu =1.0; t=[276,195,276,138,552]
Sens1 = np.array([calc_Sensitivity(m,nu,t[i],SEFD=SEFD[i],narrow=False) for i in range(len(SEFD))])
Sens = np.array([16,16,100,100,100]) # From Harp2016
# Max distance approx ; 147Ly median distance Shostalk(2000), ~700 farthest
dist_std = (700 * u.lyr.to('m'))
Ph_EIRP = np.array([calc_EIRP_min(dist_std,Sen) for Sen in Sens])
Ph_rarity = Ph_stars*freq_range_norm
Ph_stars_tot = Ph_stars.sum()
Ph_rarity_tot = Ph_rarity.sum()
Ph_EIRP_tot = Ph_EIRP.max()
iband = 20e6
Ph_speed = SEFD.mean()**2*nu/iband #Note: This value is calculated with self calculated SEFD values (which are not completely consistent with values expected from Harp 2016 values).
Ph_sky = Ph_stars * np.array([calc_BeamSize(Dish_D[i],central_freq[i]) for i in range(len(Dish_D))])
Ph_DFM = Ph_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Phoenix):', SEFD)
print('Sens (Phoenix):', Sens1)
print('Sens_Harp (Phoenix):', Sens)
print('EIRP (Phoenix):', Ph_EIRP)
print('BeamSize (Phoenix):', np.array([calc_BeamSize(Dish_D[i],central_freq[i]) for i in range(len(Dish_D))]))
print('Sky Coverage (Phoenix):', Ph_sky.sum())
print('CWTFM (Phoenix):', zeta_AO * (Ph_EIRP)/ (Ph_rarity))
print('CWTFM (Phoenix):', zeta_AO * (Ph_EIRP_tot)/ (Ph_rarity_tot))
print('DFM (Phoenix):', Ph_DFM)
print('~o~')
#----------
#ATA
telescope = 'ATA'
ATA_stars= np.array([65,1959,2822,7459])
band = np.array([8000.e6,2040.e6,337.e6,268.e6]) #There are 73MHz which are RFI flagged, it is ignored here.
central_freq = 5e9 # 1-9 GHz
freq_range_norm = (band/central_freq)
#Tsys = (80+120+95+137)/4. = 108
SEFD = calc_SEFD(calc_DishArea(6.1), 108, eff=0.58) / np.sqrt(27*26)
SEFDs = np.array([SEFD,SEFD,SEFD,SEFD])
m=6.5; nu =0.7; t=93.
dist_std = np.array([(1.4e3*3.26156 * u.lyr.to('m')),(1.1e3*3.26156 * u.lyr.to('m')),(300 * u.lyr.to('m')),(500 * u.lyr.to('m'))]) #Turnbull 2003 for HabCat
Sens = np.array([calc_Sensitivity(m,nu,t,SEFD=SEF,narrow=False) for SEF in SEFDs])
ATA_EIRP = np.array([calc_EIRP_min(dist_std[i],Sens[i]) for i in range(len(Sens))])
ATA_rarity = ATA_stars*freq_range_norm
ATA_rarity_tot = ATA_rarity.sum()
ATA_stars_tot = ATA_stars.sum()
ATA_EIRP_tot = ATA_EIRP.max()
iband = 70e6
ATA_speed = SEFD**2*nu/iband
ATA_sky = ATA_stars * 3*6./4.*np.pi/3600. # beam 3'x6' at 1.4GHz
ATA_DFM = ATA_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (ATA):', SEFD)
print('Sens (ATA):', Sens)
print('EIRP (ATA):', ATA_EIRP)
print('BeamSize (ATA):', end=' ')
print('Sky Coverage (ATA):', ATA_sky.sum())
print('CWTFM (ATA):', zeta_AO * (ATA_EIRP_tot)/ (ATA_rarity_tot))
print('DFM (ATA):', ATA_DFM)
print('~o~')
#----------
#Siemion 2013
telescope = 'GBT'
Siemion_stars= 86
band = 800e6 - 130e6 #(1.1-1.2,1.33-1.9)
freq_range_norm = (band/1.5e9)
SEFD= calc_SEFD(calc_DishArea(100), 20, eff=0.72) # 10 Jy (GBT)
m=25.; nu =1.; t=300.
Sens = calc_Sensitivity(m, nu,t,SEFD=SEFD)
dist_std = (1.1e3*3.26156 * u.lyr.to('m')) # Max distance approx
Siemion_EIRP = calc_EIRP_min(dist_std,Sens)
Siemion_rarity = Siemion_stars*freq_range_norm
iband = 800e6
Siemion_speed = (SEFD/0.85)**2*nu/iband # 0.85 ==> Siemion priv. comm. (due to 2 bit data format)
Siemion_sky = Siemion_stars * calc_BeamSize(100,1.5e9)
Siemion_DFM = Siemion_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Siemion2013):', SEFD)
print('Sens (Siemion2013):', Sens)
print('EIRP (Siemion2013):', Siemion_EIRP)
print('BeamSize (Siemion2013):',calc_BeamSize(100,1.5e9))
print('Sky Coverage (Siemion2013):', Siemion_sky)
print('CWTFM (Siemion2013):', zeta_AO * (Siemion_EIRP)/ (Siemion_rarity))
print('DFM (Siemion2013):', Siemion_DFM)
print('~o~')
#----------
#Valdes 1986
telescope='HCRO'
Valdes_stars = np.array([53, 12])
band = np.array([256*4883, 1024*76])
freq_range_norm = (band/1.516e9)
SEFD = calc_SEFD(calc_DishArea(26), 100, eff=0.5)
m=3.0; nu =[4883., 76.]; t=3000.
Sens = np.array([calc_Sensitivity(m, nu[0],t,SEFD=SEFD,npol=1.),calc_Sensitivity(m, nu[1],t,SEFD=SEFD,npol=1.)])
dist_std = (20 * u.lyr.to('m')) # Max distance approx
Valdes_EIRP = np.array([calc_EIRP_min(dist_std,Sen) for Sen in Sens])
Valdes_rarity = Valdes_stars*freq_range_norm
Valdes_rarity_tot = Valdes_rarity.sum()
Valdes_EIRP_tot = Valdes_EIRP.max()
iband = 256*4883
Valdes_speed = SEFD**2*nu[0]/iband
Valdes_sky = (Valdes_stars * calc_BeamSize(26,1.5e9)).sum()
Valdes_DFM = Valdes_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Valdes 1986):', SEFD)
print('Sens (Valdes 1986):', Sens)
print('EIRP (Valdes 1986):', Valdes_EIRP)
print('BeamSize (Valdes 1986):',calc_BeamSize(26,1.5e9))
print('Sky Coverage (Valdes 1986):', Valdes_sky)
print('CWTFM (Valdes 1986):', zeta_AO * (Valdes_EIRP_tot)/ (Valdes_rarity_tot))
print('DFM (Valdes 1986):', Valdes_DFM)
print('~o~')
#----------
#Tarter 1980
telsecope = 'NRAO 91m'
Tarter_stars=201
band = 360e3*4.
freq_range_norm = (band/1.666e9)
SEFD = calc_SEFD(calc_DishArea(91), 70, eff=0.6)
m=12.0; nu =5.5; t= 45
Sens = calc_Sensitivity(m, nu,t,SEFD=SEFD)
dist_std = (25*3.26156* u.lyr.to('m')) # Max distance approx
Tarter_EIRP = calc_EIRP_min(dist_std,Sens)
Tarter_rarity = Tarter_stars*freq_range_norm
iband = 360e3
Tarter_speed = SEFD**2*nu/iband
Tarter_sky = Tarter_stars * calc_BeamSize(91,1.666e9)
Tarter_DFM = Tarter_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Tarter1980):', SEFD)
print('Sens (Tarter1980):', Sens)
print('EIRP (Tarter1980):', Tarter_EIRP)
print('BeamSize (Tarter1980):', calc_BeamSize(91,1.666e9))
print('Sky Coverage (Tarter1980):', Tarter_sky)
print('CWTFM (Tarter1980):', zeta_AO * (Tarter_EIRP)/ (Tarter_rarity))
print('DFM (Tarter1980):', Tarter_DFM)
print('~o~')
#----------
#Verschuur1973
telescope=['300ft Telescope', '140ft Telescope']
Verschuur_stars=np.array([3,8])
band = np.array([0.6e6,20e6])
freq_range_norm = (band/1.426e9)
SEFD = np.array([calc_SEFD(calc_DishArea(300*0.3048),110, eff=0.75),calc_SEFD(calc_DishArea(140*0.3048),48, eff=0.75)]) #**NOTE** the 0.75 for the 140' is not real
m=3.0; nu =[490.,7.2e3]; t= [4*60.,5*60.]
Sens = np.array([calc_Sensitivity(m, nu[0],t[0],SEFD=SEFD[0]),calc_Sensitivity(m, nu[1],t[1],SEFD=SEFD[1])])
dist_std = (5*3.26156 * u.lyr.to('m'))
Verschuur_EIRP = np.array([calc_EIRP_min(dist_std,Sen) for Sen in Sens])
Verschuur_rarity = Verschuur_stars*freq_range_norm
Verschuur_rarity_tot = Verschuur_rarity.sum()
Verschuur_EIRP_tot = Verschuur_EIRP.max()
iband = np.array([0.6e6, 2.5e6]) #300 ft: Two 192-channel receivers (at 130 km/s with 4.74kHz=1km/s at this freq.)
Verschuur_speed = SEFD.min()**2*nu[0]/iband[0]
Verschuur_sky = (Verschuur_stars * np.array([calc_BeamSize(300*0.3048,1.42e9),calc_BeamSize(140*0.3048,1.42e9)])).sum()*2 # The two comes from the off beam.
Verschuur_DFM = Verschuur_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Verschuur1973):', SEFD)
print('Sens (Verschuur1973):', Sens)
print('EIRP (Verschuur1973):', Verschuur_EIRP)
print('BeamSize (Verschuur1973):', np.array([calc_BeamSize(300*0.3048,1.42e9),calc_BeamSize(140*0.3048,1.42e9)]))
print('Sky Coverage (Verschuur1973):', Verschuur_sky)
print('CWTFM (Verschuur1973):', zeta_AO * (Verschuur_EIRP_tot)/ (Verschuur_rarity_tot))
print('DFM (Verschuur1973):', Verschuur_DFM)
print('~o~')
#----------
#META Horowitz&Sagan
telescope=''
Horowitz_stars= 1e7
band = 1.2e6
freq_range_norm = (band/1.42e9)
SEFD = calc_SEFD(calc_DishArea(26),85, eff=0.5) # eff=0.5 ==> We were unable to find a value in the literature. We assume a similar value to the antenna of the same dimensions from Valdes & Freitas (1986).
m=30; nu =0.05; t=20
Sens = calc_Sensitivity(m, nu,t,SEFD=SEFD,narrow=False)
dist_std = (700*3.26156 * u.lyr.to('m')) # Max distance: # Horowitz & Sagan (1993) suggested values for the number of stars given a distance, based on the power of an isotropic beacon.
Horowitz_EIRP = calc_EIRP_min(dist_std,Sens)
Horowitz_rarity = Horowitz_stars*freq_range_norm
iband = 400e3
Horowitz_speed = SEFD**2*nu/iband
Horowitz_sky = 41253*.68 # Horowitz_stars * calc_BeamSize(26,1.42e9)
Horowitz_DFM = Horowitz_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Horowitz):', SEFD)
print('Sens (Horowitz):', Sens)
print('EIRP (Horowitz):', Horowitz_EIRP)
print('BeamSize (Horowitz):', end=' ')
print('Sky Coverage (Horowitz):', Horowitz_sky)
print('CWTFM (Horowitz):', zeta_AO * (Horowitz_EIRP)/ (Horowitz_rarity))
print('DFM (Horowitz):', Horowitz_DFM)
print('~o~')
#---------------------------
#BL
Price_telescopes = ['GBT','GBT','Parkes']
Price_BL_stars = np.array([883,1006,195])
Price_band = np.array([(1.2-1.025+1.925-1.34)*1e9,(2.72-1.82)*1e9,(3.444-2.574)*1e9])
Price_central_freq = np.array([1.5e9,2.27e9,3.0e9,])
Price_freq_range_norm = (Price_band/Price_central_freq)
Dish_D = np.array([100,100,64])
Price_BL_SEFD = np.array([calc_SEFD(calc_DishArea(Dish_D[0]), 20, eff=0.72),
calc_SEFD(calc_DishArea(Dish_D[1]), 20, eff=0.72),
calc_SEFD(calc_DishArea(Dish_D[2]), 35, eff=0.7),
])
m=10.; nu =3.; t=300.
Price_Sens = np.array([calc_Sensitivity(m,nu,t,SEFD=Price_BL_SEFD[i]) for i in range(len(Price_BL_SEFD))])
dist_std = (50*3.26156 * u.lyr.to('m'))
Price_BL_EIRP = np.array([calc_EIRP_min(dist_std,Sen) for Sen in Price_Sens])
Price_BL_rarity = Price_BL_stars*Price_freq_range_norm
Price_BL_stars_tot = Price_BL_stars[:2].sum()
Price_rarity_tot = Price_BL_rarity[:2].sum()
Price_EIRP_tot = Price_BL_EIRP[:2].max()
iband = 900e6
Price_BL_speed = Price_BL_SEFD.mean()**2*nu/iband
Price_BL_sky = Price_BL_stars * np.array([calc_BeamSize(Dish_D[i],Price_central_freq[i]) for i in range(len(Dish_D))])
Price_BL_DFM = Price_BL_sky * Price_band / Price_Sens**(3/2.)
if verbose:
print('SEFD (Price_BL):', Price_BL_SEFD)
print('Sens (Price_BL):', Price_Sens)
print('EIRP (Price_BL):', Price_BL_EIRP)
print('BeamSize (Price_BL):', np.array([calc_BeamSize(Dish_D[i],Price_central_freq[i]) for i in range(len(Dish_D))]))
print('Sky Coverage (Price_BL):', Price_BL_sky.sum())
print('CWTFM (Price_BL):', zeta_AO *(Price_BL_EIRP) / (Price_BL_rarity))
print('CWTFM (Price_BL_tot):', zeta_AO *(Price_EIRP_tot) / (Price_rarity_tot))
print('DFM (Price_BL):', Price_BL_DFM)
print('~o~')
#---------------------------------------------------------------------------------
#EIRP values in watts.
P = np.array([1e4,1e6,1e8,1e10,1e12,1e14,1e16,1e18,1e20,1e23])
#---------------------------
# Luminosity function limit on putative transmitters.
plt.plot(np.log10(P),np.log10(calc_NP_law3(P)),lw=20,color='gray',alpha=0.3)#,label=r'$\alpha$: %s'%alpha)
plt.plot([17,17],[-11,4],'--',lw=5,color='black',alpha=0.5)#,label='Kardashev Type I')
plt.plot([13,13],[-11,4],lw=5,color='black',alpha=0.5)#,label='AO Planetary Radar')
#---------------------------
alpha = 0.7
markersize = 20
fontsize = 20
ticksize = fontsize - 2
dot_size = markersize - 12
## PLOTTING
# different colors for different bands...
# L: Red
# S: Green
# C: Orange
# X: Blue
# if surveys overlap one or more bands, just add a markeredgecolor
price1, = plt.plot([np.log10(Price_BL_EIRP[2])],[np.log10(1./Price_BL_rarity[2])],'h', color = '#690182',markeredgecolor='w',markersize = markersize)
price2, = plt.plot([np.log10(Price_EIRP_tot)],[np.log10(1./Price_rarity_tot)],'h', color = '#440154',markeredgecolor='w',markersize = markersize)
#-------
enriquez, = plt.plot([np.log10(BL_EIRP)],[np.log10(1./BL_rarity)],'h', color = 'orange',markeredgecolor='#1c608e',markersize = markersize)
graymooley, = plt.plot(np.log10(GM_EIRP),np.log10(1./GM_rarity),'o',color ='#303b6b',markeredgecolor='w',markersize = markersize)
harpab, = plt.plot(np.log10(ATA_EIRP[0:2]),np.log10(1./ATA_rarity[0:2]),'^',color ='#a1c625',markeredgecolor='w',markersize = markersize)
harpc, = plt.plot(np.log10(ATA_EIRP[2]),np.log10(1./ATA_rarity[2]),'s',color ='#a1c625',markeredgecolor='w',markersize = markersize)
harpd, = plt.plot(np.log10(ATA_EIRP[3]),np.log10(1./ATA_rarity[3]),'h',color ='#a1c625',markeredgecolor='w',markersize = markersize)
harpall1, = plt.plot(np.log10(ATA_EIRP[0]),np.log10(1./ATA_rarity_tot),'^w',markersize = markersize,markeredgecolor='#a1c625',alpha=0.7)
harpall2, = plt.plot(np.log10(ATA_EIRP[0:1]),np.log10(1./ATA_rarity[0:1]),'ow',markersize = dot_size, markeredgecolor='#a1c625')
siemion1, = plt.plot([np.log10(Siemion_EIRP)],[np.log10(1./Siemion_rarity)],'>',color ='#26828e',markeredgecolor='w',markersize = markersize)
siemion2, = plt.plot([np.log10(Siemion_EIRP)],[np.log10(1./Siemion_rarity)],'ow',markersize = dot_size,markeredgecolor='#440154')
phoenix, = plt.plot(np.log10(Ph_EIRP),np.log10(1./Ph_rarity),'<b',color ='#31688e',markeredgecolor='w',markersize = markersize)
phoenixall, = plt.plot([np.log10(Ph_EIRP_tot)],[np.log10(1./Ph_rarity_tot)],'<w',markeredgecolor='#31688e',markersize = markersize)
horowitz_sagan, = plt.plot(np.log10(Horowitz_EIRP),np.log10(1./Horowitz_rarity),'oc',color ='#add8e6',markeredgecolor='w',markersize = markersize)
valdez, = plt.plot(np.log10(Valdes_EIRP),np.log10(1./Valdes_rarity),'sy',color ='pink',markeredgecolor='w',markersize = markersize)
tarter, = plt.plot([np.log10(Tarter_EIRP)],[np.log10(1./Tarter_rarity)],'vc',color ='#1f9e89',markeredgecolor='w',markersize = markersize)
verschuur, = plt.plot(np.log10(Verschuur_EIRP),np.log10(1./Verschuur_rarity),'sm',color ='#efda21',markeredgecolor='w',markersize = markersize)
legend_handles = {'p1':price1, 'p2':price2, 'e':enriquez, 'gm':graymooley, 'h_ab':harpab, 'h_c':harpc, 'h_d':harpd, 'h_a1':harpall1, 'h_a2':harpall2, 's1':siemion1, 's2':siemion2, 'ph':phoenix, 'pha':phoenixall, 'hs':horowitz_sagan, 'v':valdez, 't':tarter, 'ver':verschuur}
return legend_handles
def compare_SETI_limits(EIRP,rarity,shape='o',color='k',project='This Project',y_label_units=True, save_as=None):
''' Compare SETI project with previus surveys.
'''
#---------------------------------------------------------------------------------
# plotting setup
plt.ion()
plt.figure(figsize=(15, 10))
alpha = 0.7
markersize = 20
fontsize = 20
ticksize = fontsize - 2
dot_size = markersize - 12
plt.plot([np.log10(EIRP)],[np.log10(1./rarity)],marker = shape, color = color,markersize = markersize, label=project)
ET_power_law()
plt.legend(numpoints=1,scatterpoints=1,fancybox=True, shadow=True)
plt.xlabel('EIRP [log(W)]',fontsize = fontsize)
#plt.ylabel('Transmiter Galactic Rarity [log((Nstars*BW)^-1)]',fontsize=fontsize)
if y_label_units:
plt.ylabel('Transmitter Rate \n [log(1/(Nstars * rel_BW))]',fontsize=fontsize)
else:
plt.ylabel('Transmitter Rate ',fontsize=fontsize)
plt.xticks(fontsize = ticksize)
plt.yticks(fontsize = ticksize)
#plt.ylim(-10,4)
#plt.xlim(10,23)
image_filename = 'SETI_limits_comparison'
if save_as is not None:
image_filename = save_as
from datetime import datetime
image_filename = image_filename + datetime.now().strftime("_%m-%d-%y_%H:%M:%S") + '.png'
plt.savefig(image_filename, format='png',bbox_inches='tight')
# plt.savefig('Transmitter_Rarity_FoM.pdf', format='pdf', dpi=300,bbox_inches='tight')
| 23,244 | 36.920065 | 277 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/seti_limits_py3/.ipynb_checkpoints/XBand_SETI_compare-checkpoint.py
|
from ET_power_law import *
import matplotlib.pylab as plt
def xband_compare(max_distance, save=False):
#---------------------------
# Edit values below vvvvvv
#---------------------------
# Your new values ( e.g. Enriquez 2017 x 100)
project = 'This Study: X-Band' #Project name
telescope = 'GBT' # Telescope name
N_stars = 23 # Estimated number of stars
band = 4200e6 # Total bandwidth [Hz]
central_freq = 9.9e9 # Central bandwidth [Hz]
dish_diam = 100 #Telescope diameter meters (single dish in current version)
dish_Tsys = 30.80 #Telescope Tsys [Kelvin]
dish_app_eff = 0.72 #Telescope Apperture Efficiency
SNR_threshold = 10 #Survey threshold [sigma above the mean]
spectral_resolution = 3. #Spectral resolution [Hz]
scan_obs_time = 300 # Observation time per scan [sec]
#max_distance = 746.65 #Maximum distance [pc]
iband = 4200e6 #Instantaneous Bandwidth [Hz]
shape = '*' # Figure shape
color = 'orange' # Figure color
y_label_units = True # Units in Y label
#---------------------------
# Edit values above ^^^^^
#---------------------------
#Calculating limits
zeta_AO = 1e3*0.5/ 1e13
freq_range_norm = (band/central_freq)
SEFD = calc_SEFD(calc_DishArea(dish_diam), dish_Tsys, eff=dish_app_eff) # 10 Jy (GBT)
SEFD = 15.2522
Sens = calc_Sensitivity(SNR_threshold, spectral_resolution,scan_obs_time,SEFD=SEFD)
dist_m = (max_distance*3.26156 * u.lyr.to('m'))
EIRP = calc_EIRP_min(dist_m,Sens)
#EIRP = 970.5e12
survey_rarity = N_stars*freq_range_norm
survey_speed = SEFD**2*spectral_resolution/iband
survey_sky = N_stars * calc_BeamSize(dish_diam,central_freq)
survey_DFM = survey_sky * band / Sens**(3/2.)
def print_project():
print('~o~', project ,' (', telescope,') ', '~o~')
print('SEFD :', SEFD)
print('Sens :', Sens)
print('EIRP :', EIRP)
print('Max Distance (m) :', dist_m)
print('BeamSize :', calc_BeamSize(dish_diam,central_freq))
print('Sky Coverage :', survey_sky)
print('CWTFM :', zeta_AO *(EIRP) / (survey_rarity))
print('DFM :', survey_DFM)
print_project()
#---------------------------
#Comparing SETI limits
if save:
compare_SETI_limits(EIRP,survey_rarity,shape=shape,color=color,project=project,y_label_units=y_label_units, save_as='XBand_seti_compare')
xband_dict = {'EIRP':EIRP, 'rarity':survey_rarity, 'shape':shape, 'color':color, 'project':project, 'y_label_units':y_label_units}
return xband_dict
| 2,630 | 33.168831 | 145 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/seti_limits_py3/.ipynb_checkpoints/CBand_SETI_compare-checkpoint.py
|
from ET_power_law import *
import matplotlib.pylab as plt
def cband_compare(max_distance, save=False):
#---------------------------
# Edit values below vvvvvv
#---------------------------
# Your new values ( e.g. Enriquez 2017 x 100)
project = 'This Study: C-Band' #Project name
telescope = 'GBT' # Telescope name
N_stars = 20 # Estimated number of stars
band = 3800e6 # Total bandwidth [Hz]
central_freq = 5.9e9 # Central bandwidth [Hz]
dish_diam = 100 #Telescope diameter meters (single dish in current version)
dish_Tsys = 21.50 #Telescope Tsys [Kelvin]
dish_app_eff = 0.72 #Telescope Apperture Efficiency
SNR_threshold = 10 #Survey threshold [sigma above the mean]
spectral_resolution = 3. #Spectral resolution [Hz]
scan_obs_time = 300 # Observation time per scan [sec]
#max_distance = 935.46 #Maximum distance [pc]
iband = 3800e6 #Instantaneous Bandwidth [Hz]
shape = '*' # Figure shape
color = 'b' # Figure color
y_label_units = True # Units in Y label
#---------------------------
# Edit values above ^^^^^
#---------------------------
#Calculating limits
zeta_AO = 1e3*0.5/ 1e13
freq_range_norm = (band/central_freq)
SEFD = calc_SEFD(calc_DishArea(dish_diam), dish_Tsys, eff=dish_app_eff) # 10 Jy (GBT)
SEFD = 10.6468
Sens = calc_Sensitivity(SNR_threshold, spectral_resolution,scan_obs_time,SEFD=SEFD)
dist_m = (max_distance*3.26156 * u.lyr.to('m'))
EIRP = calc_EIRP_min(dist_m,Sens)
#EIRP = 677.5e12
survey_rarity = N_stars*freq_range_norm
survey_speed = SEFD**2*spectral_resolution/iband
survey_sky = N_stars * calc_BeamSize(dish_diam,central_freq)
survey_DFM = survey_sky * band / Sens**(3/2.)
def print_project():
print('~o~', project ,' (', telescope,') ', '~o~')
print('SEFD :', SEFD)
print('Sens :', Sens)
print('Max Distance (m) :', dist_m)
print('EIRP :', EIRP)
print('BeamSize :', calc_BeamSize(dish_diam,central_freq))
print('Sky Coverage :', survey_sky)
print('CWTFM :', zeta_AO *(EIRP) / (survey_rarity))
print('DFM :', survey_DFM)
print_project()
#---------------------------
#Comparing SETI limits
if save:
compare_SETI_limits(EIRP,survey_rarity,shape=shape,color=color,project=project,y_label_units=y_label_units, save_as='CBand_seti_compare')
cband_dict = {'EIRP':EIRP, 'rarity':survey_rarity, 'shape':shape, 'color':color, 'project':project, 'y_label_units':y_label_units}
return cband_dict
| 2,623 | 32.641026 | 145 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/seti_limits_py3/.ipynb_checkpoints/ET_power_law-checkpoint.py
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from astropy import units as u
from matplotlib.patches import Polygon
#plt.savefig(image_filename, format='png',bbox_inches='tight')
from scipy import stats
def calc_DishArea(d):
""" Compute dish area
d = dish diameter
"""
return np.pi * (d/2)**2
def calc_BeamSize(d,v,verbose=False):
""" Compute BeamSize
d = dish diameter
v = frequency
"""
c = 2.998e8 #speed of light
if verbose:
print('\nBeam is: %f \n'%(1.22* (c/(d*v)) *57.2958))
return (1.22* (c/(d*v)) *57.2958/2)**2*np.pi
def calc_SEFD(A, Tsys, eff=1.0):
""" Calculate SEFD
Tsys = system temperature
A = collecting area
Ae = effective collecting area
eff = aperture efficency (0.0 to 1.0)
"""
kb = 1.3806488e3 # 1.38064852e-23 Boltzmann constant
Ae = A*eff
return 2 * Tsys * kb / Ae
def calc_Sensitivity(m, nu, t, SEFD=0, Tsys=10,eff=1.0,A=100,npol=2.,narrow=True):
""" Minimum detectable luminosity for narrowband emission
Tsys = system temperature
A = collecting area
m = threshold, (e.g. 10)
nu = channel bandwidth
t = observing time
narrow = True if signal is narrower than spectral resolution.
"""
if not SEFD:
sefd = calc_SEFD(A, Tsys, eff=eff)
else:
sefd = SEFD
if narrow:
sens = m * sefd * np.sqrt(nu/(npol*t))
else:
sens = m * sefd / np.sqrt(npol*nu*t)
return sens
def calc_EIRP_min(d,Sens):
""" Minimum detectable luminosity (EIRP) for narrowband emission.
d = distance to target star []
Sens = sensitivity of the obs (Jy)
"""
#1 Jy = 1e-26 W/m2/Hz)
return 4 * np.pi * d**2 * Sens *1e-26
def calc_gain(ll, d):
""" Gain of a dish telescope
ll = wavelength (lambda)
d = dish diameter (m)
"""
return (np.pi * d / ll)**2
def calc_NP_law(alpha,P):
''' Calcualtes the power law, given a the power exponend and an array of EIRP powers in W. Based on Gray&Mooley
'''
# No = 1e25**alpha # Not sure how I got 1e25 for the transmitter power, but from the plot I get a lower number.
No = 1.e21**alpha
NP = No*(1./P**(alpha))
NPn = NP*1e3/4e11 # Normalized to stars in MW (4e11), and BW (1e3 .. ).
return NPn
def calc_NP_law2(alpha,P):
''' Calcualtes the power law, given a the power exponend and an array of EIRP powers in W. Based on BL-Lband.
'''
No = 706146012574.0**alpha / 304.48
NP = No*(1./P**(alpha))
return NP
def calc_NP_law3(P):
''' Calcualtes the power law, given a the power exponend and an array of EIRP powers in Watts. Based on fit from both above.
'''
E1 = 5.e+11
S1 = 350
E2 = 1.98792219e+21 #2.75879335e+21
S2 = 7.14285714e+08
# Solving for alpha
alpha = np.log10(S2/S1) /np.log10(E2/E1)
print('The exponent (alpha) = ', alpha)
# Solving for No
No = E1**alpha / S1
NP = No*(1./P**(alpha))
return NP
def ET_power_law(verbose=False):
#---------------------------
# Standardizing the sensitivity of telescopes by figuring out the max EIRP of a transmitter they could have found.
# For this, I need the sensitivity of the observation, given SEFD and so on.
#Standard distance (100 ly)
dist_std = (100. * u.lyr.to('m'))
#So decided not to use the distance above, it does makes sense if the original distance is shorter, but not if the distance is farther away (like in Siemion2013)
#nomalization to L_AO = 2e13 W, fraq_freq = 1/2 and N_stars = 1k
zeta_AO = 1 #np.log10(1e3*.5)/ np.log10(2e13)
zeta_AO = 1e3*0.5/ 1e13
#---------------------------
#BL
telescope = 'GBT'
BL_stars= 692
band = 660e6 #(1.1-1.2,1.34-1.9)
freq_range_norm = (band/1.5e9)
BL_SEFD = calc_SEFD(calc_DishArea(100), 20, eff=0.72) # 10 Jy (GBT)
m=25.; nu =3.; t=300.
Sens = calc_Sensitivity(m, nu,t,SEFD=BL_SEFD)
dist_std = (50*3.26156 * u.lyr.to('m')) # Max distance approx
BL_EIRP = calc_EIRP_min(dist_std,Sens)
BL_rarity = BL_stars*freq_range_norm
iband = 800e6
BL_speed = BL_SEFD**2*nu/iband
BL_sky = BL_stars * calc_BeamSize(100,1.5e9)
BL_DFM = BL_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (BL):', BL_SEFD)
print('Sens (BL):', Sens)
print('EIRP (BL):', BL_EIRP)
print('BeamSize (BL):', calc_BeamSize(100,1.5e9))
print('Sky Coverage (BL):', BL_sky)
print('CWTFM (BL):', zeta_AO *(BL_EIRP) / (BL_rarity))
print('DFM (BL):', BL_DFM)
print('~o~')
#----------
# Gray & Mooley 2017
telescope=['VLA']
GM_stars= 1e12
band = np.array([1e6, 0.125e6])
central_freq = np.array([1.4e9,8.4e9])
freq_range_norm = (band/central_freq)
SEFD = calc_SEFD(calc_DishArea(25),35, eff=0.45) / np.sqrt(27*26.) #Perley 2009
m=7.0; nu =[122., 15.3]; t=[20*60,5*60]
Sens = np.array([calc_Sensitivity(m, nu[0],t[0],SEFD=SEFD),calc_Sensitivity(m, nu[1],t[1],SEFD=SEFD)])
dist_std = (2.5e6 * u.lyr.to('m')) # Max distance approx
GM_EIRP = np.array([calc_EIRP_min(dist_std,Sen) for Sen in Sens])
GM_rarity = GM_stars*freq_range_norm
GM_rarity_tot = GM_rarity.sum()
GM_EIRP_tot = GM_EIRP.max()
iband = 1e6
GM_speed = SEFD**2*nu[0]/iband
GM_sky = 8*(0.95/2.)**2*np.pi # 0.95deg images #NOTE: in Enriquez 2017, we used this as the radius, but it is the diameter.
GM_DFM = GM_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Gray & Mooley 2017):', SEFD)
print('Sens (Gray & Mooley 2017):', Sens)
print('EIRP (Gray & Mooley 2017):', GM_EIRP)
print('BeamSize (Gray & Mooley 2017):', end=' ')
print('Sky Coverage (Gray & Mooley 2017):', GM_sky)
print('CWTFM (Gray & Mooley 2017):', zeta_AO * (GM_EIRP_tot)/ (GM_rarity_tot)) #,'or', zeta_AO*stats.hmean(GM_EIRP/GM_rarity)
print('DFM (Gray & Mooley 2017):', GM_DFM)
print('~o~')
#----------
#Phoenix
telescope = ['Arecibo','Arecibo; Parkes,Parkes,NRAO140']
Ph_stars = np.array([290,371,206,105,195]) # From Harp2016
#180MHz skip Backus2002; band from Harp2016
band = np.array([(1.75-1.2)*1e9 - 180e6,(3.0-1.75)*1e9,(1.75-1.2)*1e9, (3.0-1.75)*1e9, (3.0-1.2)*1e9])
central_freq = np.array([1.5e9,2.375e9,1.5e9,2.375e9,2.1e9])
freq_range_norm = (band/central_freq)
Dish_D = np.array([305,225,64,64,43]) # Email from G. Harp
SEFD = np.array([calc_SEFD(calc_DishArea(Dish_D[0]), 40, eff=0.7),
calc_SEFD(calc_DishArea(Dish_D[1]), 40, eff=0.7),
calc_SEFD(calc_DishArea(Dish_D[2]), 35, eff=0.7),
calc_SEFD(calc_DishArea(Dish_D[3]), 35, eff=0.7),
calc_SEFD(calc_DishArea(Dish_D[4]), 35, eff=0.7)])
m=1; nu =1.0; t=[276,195,276,138,552]
Sens1 = np.array([calc_Sensitivity(m,nu,t[i],SEFD=SEFD[i],narrow=False) for i in range(len(SEFD))])
Sens = np.array([16,16,100,100,100]) # From Harp2016
# Max distance approx ; 147Ly median distance Shostalk(2000), ~700 farthest
dist_std = (700 * u.lyr.to('m'))
Ph_EIRP = np.array([calc_EIRP_min(dist_std,Sen) for Sen in Sens])
Ph_rarity = Ph_stars*freq_range_norm
Ph_stars_tot = Ph_stars.sum()
Ph_rarity_tot = Ph_rarity.sum()
Ph_EIRP_tot = Ph_EIRP.max()
iband = 20e6
Ph_speed = SEFD.mean()**2*nu/iband #Note: This value is calculated with self calculated SEFD values (which are not completely consistent with values expected from Harp 2016 values).
Ph_sky = Ph_stars * np.array([calc_BeamSize(Dish_D[i],central_freq[i]) for i in range(len(Dish_D))])
Ph_DFM = Ph_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Phoenix):', SEFD)
print('Sens (Phoenix):', Sens1)
print('Sens_Harp (Phoenix):', Sens)
print('EIRP (Phoenix):', Ph_EIRP)
print('BeamSize (Phoenix):', np.array([calc_BeamSize(Dish_D[i],central_freq[i]) for i in range(len(Dish_D))]))
print('Sky Coverage (Phoenix):', Ph_sky.sum())
print('CWTFM (Phoenix):', zeta_AO * (Ph_EIRP)/ (Ph_rarity))
print('CWTFM (Phoenix):', zeta_AO * (Ph_EIRP_tot)/ (Ph_rarity_tot))
print('DFM (Phoenix):', Ph_DFM)
print('~o~')
#----------
#ATA
telescope = 'ATA'
ATA_stars= np.array([65,1959,2822,7459])
band = np.array([8000.e6,2040.e6,337.e6,268.e6]) #There are 73MHz which are RFI flagged, it is ignored here.
central_freq = 5e9 # 1-9 GHz
freq_range_norm = (band/central_freq)
#Tsys = (80+120+95+137)/4. = 108
SEFD = calc_SEFD(calc_DishArea(6.1), 108, eff=0.58) / np.sqrt(27*26)
SEFDs = np.array([SEFD,SEFD,SEFD,SEFD])
m=6.5; nu =0.7; t=93.
dist_std = np.array([(1.4e3*3.26156 * u.lyr.to('m')),(1.1e3*3.26156 * u.lyr.to('m')),(300 * u.lyr.to('m')),(500 * u.lyr.to('m'))]) #Turnbull 2003 for HabCat
Sens = np.array([calc_Sensitivity(m,nu,t,SEFD=SEF,narrow=False) for SEF in SEFDs])
ATA_EIRP = np.array([calc_EIRP_min(dist_std[i],Sens[i]) for i in range(len(Sens))])
ATA_rarity = ATA_stars*freq_range_norm
ATA_rarity_tot = ATA_rarity.sum()
ATA_stars_tot = ATA_stars.sum()
ATA_EIRP_tot = ATA_EIRP.max()
iband = 70e6
ATA_speed = SEFD**2*nu/iband
ATA_sky = ATA_stars * 3*6./4.*np.pi/3600. # beam 3'x6' at 1.4GHz
ATA_DFM = ATA_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (ATA):', SEFD)
print('Sens (ATA):', Sens)
print('EIRP (ATA):', ATA_EIRP)
print('BeamSize (ATA):', end=' ')
print('Sky Coverage (ATA):', ATA_sky.sum())
print('CWTFM (ATA):', zeta_AO * (ATA_EIRP_tot)/ (ATA_rarity_tot))
print('DFM (ATA):', ATA_DFM)
print('~o~')
#----------
#Siemion 2013
telescope = 'GBT'
Siemion_stars= 86
band = 800e6 - 130e6 #(1.1-1.2,1.33-1.9)
freq_range_norm = (band/1.5e9)
SEFD= calc_SEFD(calc_DishArea(100), 20, eff=0.72) # 10 Jy (GBT)
m=25.; nu =1.; t=300.
Sens = calc_Sensitivity(m, nu,t,SEFD=SEFD)
dist_std = (1.1e3*3.26156 * u.lyr.to('m')) # Max distance approx
Siemion_EIRP = calc_EIRP_min(dist_std,Sens)
Siemion_rarity = Siemion_stars*freq_range_norm
iband = 800e6
Siemion_speed = (SEFD/0.85)**2*nu/iband # 0.85 ==> Siemion priv. comm. (due to 2 bit data format)
Siemion_sky = Siemion_stars * calc_BeamSize(100,1.5e9)
Siemion_DFM = Siemion_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Siemion2013):', SEFD)
print('Sens (Siemion2013):', Sens)
print('EIRP (Siemion2013):', Siemion_EIRP)
print('BeamSize (Siemion2013):',calc_BeamSize(100,1.5e9))
print('Sky Coverage (Siemion2013):', Siemion_sky)
print('CWTFM (Siemion2013):', zeta_AO * (Siemion_EIRP)/ (Siemion_rarity))
print('DFM (Siemion2013):', Siemion_DFM)
print('~o~')
#----------
#Valdes 1986
telescope='HCRO'
Valdes_stars = np.array([53, 12])
band = np.array([256*4883, 1024*76])
freq_range_norm = (band/1.516e9)
SEFD = calc_SEFD(calc_DishArea(26), 100, eff=0.5)
m=3.0; nu =[4883., 76.]; t=3000.
Sens = np.array([calc_Sensitivity(m, nu[0],t,SEFD=SEFD,npol=1.),calc_Sensitivity(m, nu[1],t,SEFD=SEFD,npol=1.)])
dist_std = (20 * u.lyr.to('m')) # Max distance approx
Valdes_EIRP = np.array([calc_EIRP_min(dist_std,Sen) for Sen in Sens])
Valdes_rarity = Valdes_stars*freq_range_norm
Valdes_rarity_tot = Valdes_rarity.sum()
Valdes_EIRP_tot = Valdes_EIRP.max()
iband = 256*4883
Valdes_speed = SEFD**2*nu[0]/iband
Valdes_sky = (Valdes_stars * calc_BeamSize(26,1.5e9)).sum()
Valdes_DFM = Valdes_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Valdes 1986):', SEFD)
print('Sens (Valdes 1986):', Sens)
print('EIRP (Valdes 1986):', Valdes_EIRP)
print('BeamSize (Valdes 1986):',calc_BeamSize(26,1.5e9))
print('Sky Coverage (Valdes 1986):', Valdes_sky)
print('CWTFM (Valdes 1986):', zeta_AO * (Valdes_EIRP_tot)/ (Valdes_rarity_tot))
print('DFM (Valdes 1986):', Valdes_DFM)
print('~o~')
#----------
#Tarter 1980
telsecope = 'NRAO 91m'
Tarter_stars=201
band = 360e3*4.
freq_range_norm = (band/1.666e9)
SEFD = calc_SEFD(calc_DishArea(91), 70, eff=0.6)
m=12.0; nu =5.5; t= 45
Sens = calc_Sensitivity(m, nu,t,SEFD=SEFD)
dist_std = (25*3.26156* u.lyr.to('m')) # Max distance approx
Tarter_EIRP = calc_EIRP_min(dist_std,Sens)
Tarter_rarity = Tarter_stars*freq_range_norm
iband = 360e3
Tarter_speed = SEFD**2*nu/iband
Tarter_sky = Tarter_stars * calc_BeamSize(91,1.666e9)
Tarter_DFM = Tarter_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Tarter1980):', SEFD)
print('Sens (Tarter1980):', Sens)
print('EIRP (Tarter1980):', Tarter_EIRP)
print('BeamSize (Tarter1980):', calc_BeamSize(91,1.666e9))
print('Sky Coverage (Tarter1980):', Tarter_sky)
print('CWTFM (Tarter1980):', zeta_AO * (Tarter_EIRP)/ (Tarter_rarity))
print('DFM (Tarter1980):', Tarter_DFM)
print('~o~')
#----------
#Verschuur1973
telescope=['300ft Telescope', '140ft Telescope']
Verschuur_stars=np.array([3,8])
band = np.array([0.6e6,20e6])
freq_range_norm = (band/1.426e9)
SEFD = np.array([calc_SEFD(calc_DishArea(300*0.3048),110, eff=0.75),calc_SEFD(calc_DishArea(140*0.3048),48, eff=0.75)]) #**NOTE** the 0.75 for the 140' is not real
m=3.0; nu =[490.,7.2e3]; t= [4*60.,5*60.]
Sens = np.array([calc_Sensitivity(m, nu[0],t[0],SEFD=SEFD[0]),calc_Sensitivity(m, nu[1],t[1],SEFD=SEFD[1])])
dist_std = (5*3.26156 * u.lyr.to('m'))
Verschuur_EIRP = np.array([calc_EIRP_min(dist_std,Sen) for Sen in Sens])
Verschuur_rarity = Verschuur_stars*freq_range_norm
Verschuur_rarity_tot = Verschuur_rarity.sum()
Verschuur_EIRP_tot = Verschuur_EIRP.max()
iband = np.array([0.6e6, 2.5e6]) #300 ft: Two 192-channel receivers (at 130 km/s with 4.74kHz=1km/s at this freq.)
Verschuur_speed = SEFD.min()**2*nu[0]/iband[0]
Verschuur_sky = (Verschuur_stars * np.array([calc_BeamSize(300*0.3048,1.42e9),calc_BeamSize(140*0.3048,1.42e9)])).sum()*2 # The two comes from the off beam.
Verschuur_DFM = Verschuur_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Verschuur1973):', SEFD)
print('Sens (Verschuur1973):', Sens)
print('EIRP (Verschuur1973):', Verschuur_EIRP)
print('BeamSize (Verschuur1973):', np.array([calc_BeamSize(300*0.3048,1.42e9),calc_BeamSize(140*0.3048,1.42e9)]))
print('Sky Coverage (Verschuur1973):', Verschuur_sky)
print('CWTFM (Verschuur1973):', zeta_AO * (Verschuur_EIRP_tot)/ (Verschuur_rarity_tot))
print('DFM (Verschuur1973):', Verschuur_DFM)
print('~o~')
#----------
#META Horowitz&Sagan
telescope=''
Horowitz_stars= 1e7
band = 1.2e6
freq_range_norm = (band/1.42e9)
SEFD = calc_SEFD(calc_DishArea(26),85, eff=0.5) # eff=0.5 ==> We were unable to find a value in the literature. We assume a similar value to the antenna of the same dimensions from Valdes & Freitas (1986).
m=30; nu =0.05; t=20
Sens = calc_Sensitivity(m, nu,t,SEFD=SEFD,narrow=False)
dist_std = (700*3.26156 * u.lyr.to('m')) # Max distance: # Horowitz & Sagan (1993) suggested values for the number of stars given a distance, based on the power of an isotropic beacon.
Horowitz_EIRP = calc_EIRP_min(dist_std,Sens)
Horowitz_rarity = Horowitz_stars*freq_range_norm
iband = 400e3
Horowitz_speed = SEFD**2*nu/iband
Horowitz_sky = 41253*.68 # Horowitz_stars * calc_BeamSize(26,1.42e9)
Horowitz_DFM = Horowitz_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Horowitz):', SEFD)
print('Sens (Horowitz):', Sens)
print('EIRP (Horowitz):', Horowitz_EIRP)
print('BeamSize (Horowitz):', end=' ')
print('Sky Coverage (Horowitz):', Horowitz_sky)
print('CWTFM (Horowitz):', zeta_AO * (Horowitz_EIRP)/ (Horowitz_rarity))
print('DFM (Horowitz):', Horowitz_DFM)
print('~o~')
#---------------------------
#BL
Price_telescopes = ['GBT','GBT','Parkes']
Price_BL_stars = np.array([883,1006,195])
Price_band = np.array([(1.2-1.025+1.925-1.34)*1e9,(2.72-1.82)*1e9,(3.444-2.574)*1e9])
Price_central_freq = np.array([1.5e9,2.27e9,3.0e9,])
Price_freq_range_norm = (Price_band/Price_central_freq)
Dish_D = np.array([100,100,64])
Price_BL_SEFD = np.array([calc_SEFD(calc_DishArea(Dish_D[0]), 20, eff=0.72),
calc_SEFD(calc_DishArea(Dish_D[1]), 20, eff=0.72),
calc_SEFD(calc_DishArea(Dish_D[2]), 35, eff=0.7),
])
m=10.; nu =3.; t=300.
Price_Sens = np.array([calc_Sensitivity(m,nu,t,SEFD=Price_BL_SEFD[i]) for i in range(len(Price_BL_SEFD))])
dist_std = (50*3.26156 * u.lyr.to('m'))
Price_BL_EIRP = np.array([calc_EIRP_min(dist_std,Sen) for Sen in Price_Sens])
Price_BL_rarity = Price_BL_stars*Price_freq_range_norm
Price_BL_stars_tot = Price_BL_stars[:2].sum()
Price_rarity_tot = Price_BL_rarity[:2].sum()
Price_EIRP_tot = Price_BL_EIRP[:2].max()
iband = 900e6
Price_BL_speed = Price_BL_SEFD.mean()**2*nu/iband
Price_BL_sky = Price_BL_stars * np.array([calc_BeamSize(Dish_D[i],Price_central_freq[i]) for i in range(len(Dish_D))])
Price_BL_DFM = Price_BL_sky * Price_band / Price_Sens**(3/2.)
if verbose:
print('SEFD (Price_BL):', Price_BL_SEFD)
print('Sens (Price_BL):', Price_Sens)
print('EIRP (Price_BL):', Price_BL_EIRP)
print('BeamSize (Price_BL):', np.array([calc_BeamSize(Dish_D[i],Price_central_freq[i]) for i in range(len(Dish_D))]))
print('Sky Coverage (Price_BL):', Price_BL_sky.sum())
print('CWTFM (Price_BL):', zeta_AO *(Price_BL_EIRP) / (Price_BL_rarity))
print('CWTFM (Price_BL_tot):', zeta_AO *(Price_EIRP_tot) / (Price_rarity_tot))
print('DFM (Price_BL):', Price_BL_DFM)
print('~o~')
#---------------------------------------------------------------------------------
#EIRP values in watts.
P = np.array([1e4,1e6,1e8,1e10,1e12,1e14,1e16,1e18,1e20,1e23])
#---------------------------
# Luminosity function limit on putative transmitters.
plt.plot(np.log10(P),np.log10(calc_NP_law3(P)),lw=20,color='gray',alpha=0.3)#,label=r'$\alpha$: %s'%alpha)
plt.plot([17,17],[-11,4],'--',lw=5,color='black',alpha=0.5)#,label='Kardashev Type I')
plt.plot([13,13],[-11,4],lw=5,color='black',alpha=0.5)#,label='AO Planetary Radar')
#---------------------------
alpha = 0.7
markersize = 20
fontsize = 20
ticksize = fontsize - 2
dot_size = markersize - 12
## PLOTTING
# different colors for different bands...
# L: Red
# S: Green
# C: Orange
# X: Blue
# if surveys overlap one or more bands, just add a markeredgecolor
price1, = plt.plot([np.log10(Price_BL_EIRP[2])],[np.log10(1./Price_BL_rarity[2])],'h', color = '#690182',markeredgecolor='w',markersize = markersize)
price2, = plt.plot([np.log10(Price_EIRP_tot)],[np.log10(1./Price_rarity_tot)],'h', color = '#440154',markeredgecolor='w',markersize = markersize)
#-------
enriquez, = plt.plot([np.log10(BL_EIRP)],[np.log10(1./BL_rarity)],'h', color = 'orange',markeredgecolor='#1c608e',markersize = markersize)
graymooley, = plt.plot(np.log10(GM_EIRP),np.log10(1./GM_rarity),'o',color ='#303b6b',markeredgecolor='w',markersize = markersize)
harpab, = plt.plot(np.log10(ATA_EIRP[0:2]),np.log10(1./ATA_rarity[0:2]),'^',color ='#a1c625',markeredgecolor='w',markersize = markersize)
harpc, = plt.plot(np.log10(ATA_EIRP[2]),np.log10(1./ATA_rarity[2]),'s',color ='#a1c625',markeredgecolor='w',markersize = markersize)
harpd, = plt.plot(np.log10(ATA_EIRP[3]),np.log10(1./ATA_rarity[3]),'h',color ='#a1c625',markeredgecolor='w',markersize = markersize)
harpall1, = plt.plot(np.log10(ATA_EIRP[0]),np.log10(1./ATA_rarity_tot),'^w',markersize = markersize,markeredgecolor='#a1c625',alpha=0.7)
harpall2, = plt.plot(np.log10(ATA_EIRP[0:1]),np.log10(1./ATA_rarity[0:1]),'ow',markersize = dot_size, markeredgecolor='#a1c625')
siemion1, = plt.plot([np.log10(Siemion_EIRP)],[np.log10(1./Siemion_rarity)],'>',color ='#26828e',markeredgecolor='w',markersize = markersize)
siemion2, = plt.plot([np.log10(Siemion_EIRP)],[np.log10(1./Siemion_rarity)],'ow',markersize = dot_size,markeredgecolor='#440154')
phoenix, = plt.plot(np.log10(Ph_EIRP),np.log10(1./Ph_rarity),'<b',color ='#31688e',markeredgecolor='w',markersize = markersize)
phoenixall, = plt.plot([np.log10(Ph_EIRP_tot)],[np.log10(1./Ph_rarity_tot)],'<w',markeredgecolor='#31688e',markersize = markersize)
horowitz_sagan, = plt.plot(np.log10(Horowitz_EIRP),np.log10(1./Horowitz_rarity),'oc',color ='#add8e6',markeredgecolor='w',markersize = markersize)
valdez, = plt.plot(np.log10(Valdes_EIRP),np.log10(1./Valdes_rarity),'sy',color ='pink',markeredgecolor='w',markersize = markersize)
tarter, = plt.plot([np.log10(Tarter_EIRP)],[np.log10(1./Tarter_rarity)],'vc',color ='#1f9e89',markeredgecolor='w',markersize = markersize)
verschuur, = plt.plot(np.log10(Verschuur_EIRP),np.log10(1./Verschuur_rarity),'sm',color ='#efda21',markeredgecolor='w',markersize = markersize)
legend_handles = {'p1':price1, 'p2':price2, 'e':enriquez, 'gm':graymooley, 'h_ab':harpab, 'h_c':harpc, 'h_d':harpd, 'h_a1':harpall1, 'h_a2':harpall2, 's1':siemion1, 's2':siemion2, 'ph':phoenix, 'pha':phoenixall, 'hs':horowitz_sagan, 'v':valdez, 't':tarter, 'ver':verschuur}
return legend_handles
def compare_SETI_limits(EIRP,rarity,shape='o',color='k',project='This Project',y_label_units=True, save_as=None):
''' Compare SETI project with previus surveys.
'''
#---------------------------------------------------------------------------------
# plotting setup
plt.ion()
plt.figure(figsize=(15, 10))
alpha = 0.7
markersize = 20
fontsize = 20
ticksize = fontsize - 2
dot_size = markersize - 12
plt.plot([np.log10(EIRP)],[np.log10(1./rarity)],marker = shape, color = color,markersize = markersize, label=project)
ET_power_law()
plt.legend(numpoints=1,scatterpoints=1,fancybox=True, shadow=True)
plt.xlabel('EIRP [log(W)]',fontsize = fontsize)
#plt.ylabel('Transmiter Galactic Rarity [log((Nstars*BW)^-1)]',fontsize=fontsize)
if y_label_units:
plt.ylabel('Transmitter Rate \n [log(1/(Nstars * rel_BW))]',fontsize=fontsize)
else:
plt.ylabel('Transmitter Rate ',fontsize=fontsize)
plt.xticks(fontsize = ticksize)
plt.yticks(fontsize = ticksize)
#plt.ylim(-10,4)
#plt.xlim(10,23)
image_filename = 'SETI_limits_comparison'
if save_as is not None:
image_filename = save_as
from datetime import datetime
image_filename = image_filename + datetime.now().strftime("_%m-%d-%y_%H:%M:%S") + '.png'
plt.savefig(image_filename, format='png',bbox_inches='tight')
# plt.savefig('Transmitter_Rarity_FoM.pdf', format='pdf', dpi=300,bbox_inches='tight')
| 23,243 | 36.918434 | 277 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/seti_limits_py3/.ipynb_checkpoints/color_coded_ET_power_law-checkpoint.py
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from astropy import units as u
from matplotlib.patches import Polygon
#plt.savefig(image_filename, format='png',bbox_inches='tight')
from scipy import stats
#import add_SETI_limits
def calc_DishArea(d):
""" Compute dish area
d = dish diameter
"""
return np.pi * (d/2)**2
def calc_BeamSize(d,v,verbose=False):
""" Compute BeamSize
d = dish diameter
v = frequency
"""
c = 2.998e8 #speed of light
if verbose:
print('\nBeam is: %f \n'%(1.22* (c/(d*v)) *57.2958))
return (1.22* (c/(d*v)) *57.2958/2)**2*np.pi
def calc_SEFD(A, Tsys, eff=1.0):
""" Calculate SEFD
Tsys = system temperature
A = collecting area
Ae = effective collecting area
eff = aperture efficency (0.0 to 1.0)
"""
kb = 1.3806488e3 # 1.38064852e-23 Boltzmann constant
Ae = A*eff
return 2 * Tsys * kb / Ae
def calc_Sensitivity(m, nu, t, SEFD=0, Tsys=10,eff=1.0,A=100,npol=2.,narrow=True):
""" Minimum detectable luminosity for narrowband emission
Tsys = system temperature
A = collecting area
m = threshold, (e.g. 10)
nu = channel bandwidth
t = observing time
narrow = True if signal is narrower than spectral resolution.
"""
if not SEFD:
sefd = calc_SEFD(A, Tsys, eff=eff)
else:
sefd = SEFD
if narrow:
sens = m * sefd * np.sqrt(nu/(npol*t))
else:
sens = m * sefd / np.sqrt(npol*nu*t)
return sens
def calc_EIRP_min(d,Sens):
""" Minimum detectable luminosity (EIRP) for narrowband emission.
d = distance to target star []
Sens = sensitivity of the obs (Jy)
"""
#1 Jy = 1e-26 W/m2/Hz)
return 4 * np.pi * d**2 * Sens *1e-26
def calc_gain(ll, d):
""" Gain of a dish telescope
ll = wavelength (lambda)
d = dish diameter (m)
"""
return (np.pi * d / ll)**2
def calc_NP_law(alpha,P):
''' Calcualtes the power law, given a the power exponend and an array of EIRP powers in W. Based on Gray&Mooley
'''
# No = 1e25**alpha # Not sure how I got 1e25 for the transmitter power, but from the plot I get a lower number.
No = 1.e21**alpha
NP = No*(1./P**(alpha))
NPn = NP*1e3/4e11 # Normalized to stars in MW (4e11), and BW (1e3 .. ).
return NPn
def calc_NP_law2(alpha,P):
''' Calcualtes the power law, given a the power exponend and an array of EIRP powers in W. Based on BL-Lband.
'''
No = 706146012574.0**alpha / 304.48
NP = No*(1./P**(alpha))
return NP
def calc_NP_law3(P):
''' Calcualtes the power law, given a the power exponend and an array of EIRP powers in Watts. Based on fit from both above.
'''
E1 = 5.e+11
S1 = 350
E2 = 1.98792219e+21 #2.75879335e+21
S2 = 7.14285714e+08
# Solving for alpha
alpha = np.log10(S2/S1) /np.log10(E2/E1)
print('The exponent (alpha) = ', alpha)
# Solving for No
No = E1**alpha / S1
NP = No*(1./P**(alpha))
return NP
def ET_power_law(verbose=False):
#---------------------------
# Standardizing the sensitivity of telescopes by figuring out the max EIRP of a transmitter they could have found.
# For this, I need the sensitivity of the observation, given SEFD and so on.
#Standard distance (100 ly)
dist_std = (100. * u.lyr.to('m'))
#So decided not to use the distance above, it does makes sense if the original distance is shorter, but not if the distance is farther away (like in Siemion2013)
#nomalization to L_AO = 2e13 W, fraq_freq = 1/2 and N_stars = 1k
zeta_AO = 1 #np.log10(1e3*.5)/ np.log10(2e13)
zeta_AO = 1e3*0.5/ 1e13
#---------------------------
#BL
telescope = 'GBT'
BL_stars= 692
band = 660e6 #(1.1-1.2,1.34-1.9)
freq_range_norm = (band/1.5e9)
BL_SEFD = calc_SEFD(calc_DishArea(100), 20, eff=0.72) # 10 Jy (GBT)
m=25.; nu =3.; t=300.
Sens = calc_Sensitivity(m, nu,t,SEFD=BL_SEFD)
dist_std = (50*3.26156 * u.lyr.to('m')) # Max distance approx
BL_EIRP = calc_EIRP_min(dist_std,Sens)
BL_rarity = BL_stars*freq_range_norm
iband = 800e6
BL_speed = BL_SEFD**2*nu/iband
BL_sky = BL_stars * calc_BeamSize(100,1.5e9)
BL_DFM = BL_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (BL):', BL_SEFD)
print('Sens (BL):', Sens)
print('EIRP (BL):', BL_EIRP)
print('BeamSize (BL):', calc_BeamSize(100,1.5e9))
print('Sky Coverage (BL):', BL_sky)
print('CWTFM (BL):', zeta_AO *(BL_EIRP) / (BL_rarity))
print('DFM (BL):', BL_DFM)
print('~o~')
#----------
# Gray & Mooley 2017
telescope=['VLA']
GM_stars= 1e12
band = np.array([1e6, 0.125e6])
central_freq = np.array([1.4e9,8.4e9])
freq_range_norm = (band/central_freq)
SEFD = calc_SEFD(calc_DishArea(25),35, eff=0.45) / np.sqrt(27*26.) #Perley 2009
m=7.0; nu =[122., 15.3]; t=[20*60,5*60]
Sens = np.array([calc_Sensitivity(m, nu[0],t[0],SEFD=SEFD),calc_Sensitivity(m, nu[1],t[1],SEFD=SEFD)])
dist_std = (2.5e6 * u.lyr.to('m')) # Max distance approx
GM_EIRP = np.array([calc_EIRP_min(dist_std,Sen) for Sen in Sens])
GM_rarity = GM_stars*freq_range_norm
GM_rarity_tot = GM_rarity.sum()
GM_EIRP_tot = GM_EIRP.max()
iband = 1e6
GM_speed = SEFD**2*nu[0]/iband
GM_sky = 8*(0.95/2.)**2*np.pi # 0.95deg images #NOTE: in Enriquez 2017, we used this as the radius, but it is the diameter.
GM_DFM = GM_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Gray & Mooley 2017):', SEFD)
print('Sens (Gray & Mooley 2017):', Sens)
print('EIRP (Gray & Mooley 2017):', GM_EIRP)
print('BeamSize (Gray & Mooley 2017):', end=' ')
print('Sky Coverage (Gray & Mooley 2017):', GM_sky)
print('CWTFM (Gray & Mooley 2017):', zeta_AO * (GM_EIRP_tot)/ (GM_rarity_tot)) #,'or', zeta_AO*stats.hmean(GM_EIRP/GM_rarity)
print('DFM (Gray & Mooley 2017):', GM_DFM)
print('~o~')
#----------
#Phoenix
telescope = ['Arecibo','Arecibo; Parkes,Parkes,NRAO140']
Ph_stars = np.array([290,371,206,105,195]) # From Harp2016
#180MHz skip Backus2002; band from Harp2016
band = np.array([(1.75-1.2)*1e9 - 180e6,(3.0-1.75)*1e9,(1.75-1.2)*1e9, (3.0-1.75)*1e9, (3.0-1.2)*1e9])
central_freq = np.array([1.5e9,2.375e9,1.5e9,2.375e9,2.1e9])
freq_range_norm = (band/central_freq)
Dish_D = np.array([305,225,64,64,43]) # Email from G. Harp
SEFD = np.array([calc_SEFD(calc_DishArea(Dish_D[0]), 40, eff=0.7),
calc_SEFD(calc_DishArea(Dish_D[1]), 40, eff=0.7),
calc_SEFD(calc_DishArea(Dish_D[2]), 35, eff=0.7),
calc_SEFD(calc_DishArea(Dish_D[3]), 35, eff=0.7),
calc_SEFD(calc_DishArea(Dish_D[4]), 35, eff=0.7)])
m=1; nu =1.0; t=[276,195,276,138,552]
Sens1 = np.array([calc_Sensitivity(m,nu,t[i],SEFD=SEFD[i],narrow=False) for i in range(len(SEFD))])
Sens = np.array([16,16,100,100,100]) # From Harp2016
# Max distance approx ; 147Ly median distance Shostalk(2000), ~700 farthest
dist_std = (700 * u.lyr.to('m'))
Ph_EIRP = np.array([calc_EIRP_min(dist_std,Sen) for Sen in Sens])
Ph_rarity = Ph_stars*freq_range_norm
Ph_stars_tot = Ph_stars.sum()
Ph_rarity_tot = Ph_rarity.sum()
Ph_EIRP_tot = Ph_EIRP.max()
iband = 20e6
Ph_speed = SEFD.mean()**2*nu/iband #Note: This value is calculated with self calculated SEFD values (which are not completely consistent with values expected from Harp 2016 values).
Ph_sky = Ph_stars * np.array([calc_BeamSize(Dish_D[i],central_freq[i]) for i in range(len(Dish_D))])
Ph_DFM = Ph_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Phoenix):', SEFD)
print('Sens (Phoenix):', Sens1)
print('Sens_Harp (Phoenix):', Sens)
print('EIRP (Phoenix):', Ph_EIRP)
print('BeamSize (Phoenix):', np.array([calc_BeamSize(Dish_D[i],central_freq[i]) for i in range(len(Dish_D))]))
print('Sky Coverage (Phoenix):', Ph_sky.sum())
print('CWTFM (Phoenix):', zeta_AO * (Ph_EIRP)/ (Ph_rarity))
print('CWTFM (Phoenix):', zeta_AO * (Ph_EIRP_tot)/ (Ph_rarity_tot))
print('DFM (Phoenix):', Ph_DFM)
print('~o~')
#----------
#ATA
telescope = 'ATA'
ATA_stars= np.array([65,1959,2822,7459])
band = np.array([8000.e6,2040.e6,337.e6,268.e6]) #There are 73MHz which are RFI flagged, it is ignored here.
central_freq = 5e9 # 1-9 GHz
freq_range_norm = (band/central_freq)
#Tsys = (80+120+95+137)/4. = 108
SEFD = calc_SEFD(calc_DishArea(6.1), 108, eff=0.58) / np.sqrt(27*26)
SEFDs = np.array([SEFD,SEFD,SEFD,SEFD])
m=6.5; nu =0.7; t=93.
dist_std = np.array([(1.4e3*3.26156 * u.lyr.to('m')),(1.1e3*3.26156 * u.lyr.to('m')),(300 * u.lyr.to('m')),(500 * u.lyr.to('m'))]) #Turnbull 2003 for HabCat
Sens = np.array([calc_Sensitivity(m,nu,t,SEFD=SEF,narrow=False) for SEF in SEFDs])
ATA_EIRP = np.array([calc_EIRP_min(dist_std[i],Sens[i]) for i in range(len(Sens))])
ATA_rarity = ATA_stars*freq_range_norm
ATA_rarity_tot = ATA_rarity.sum()
ATA_stars_tot = ATA_stars.sum()
ATA_EIRP_tot = ATA_EIRP.max()
iband = 70e6
ATA_speed = SEFD**2*nu/iband
ATA_sky = ATA_stars * 3*6./4.*np.pi/3600. # beam 3'x6' at 1.4GHz
ATA_DFM = ATA_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (ATA):', SEFD)
print('Sens (ATA):', Sens)
print('EIRP (ATA):', ATA_EIRP)
print('BeamSize (ATA):', end=' ')
print('Sky Coverage (ATA):', ATA_sky.sum())
print('CWTFM (ATA):', zeta_AO * (ATA_EIRP_tot)/ (ATA_rarity_tot))
print('DFM (ATA):', ATA_DFM)
print('~o~')
#----------
#Siemion 2013
telescope = 'GBT'
Siemion_stars= 86
band = 800e6 - 130e6 #(1.1-1.2,1.33-1.9)
freq_range_norm = (band/1.5e9)
SEFD= calc_SEFD(calc_DishArea(100), 20, eff=0.72) # 10 Jy (GBT)
m=25.; nu =1.; t=300.
Sens = calc_Sensitivity(m, nu,t,SEFD=SEFD)
dist_std = (1.1e3*3.26156 * u.lyr.to('m')) # Max distance approx
Siemion_EIRP = calc_EIRP_min(dist_std,Sens)
Siemion_rarity = Siemion_stars*freq_range_norm
iband = 800e6
Siemion_speed = (SEFD/0.85)**2*nu/iband # 0.85 ==> Siemion priv. comm. (due to 2 bit data format)
Siemion_sky = Siemion_stars * calc_BeamSize(100,1.5e9)
Siemion_DFM = Siemion_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Siemion2013):', SEFD)
print('Sens (Siemion2013):', Sens)
print('EIRP (Siemion2013):', Siemion_EIRP)
print('BeamSize (Siemion2013):',calc_BeamSize(100,1.5e9))
print('Sky Coverage (Siemion2013):', Siemion_sky)
print('CWTFM (Siemion2013):', zeta_AO * (Siemion_EIRP)/ (Siemion_rarity))
print('DFM (Siemion2013):', Siemion_DFM)
print('~o~')
#----------
#Valdes 1986
telescope='HCRO'
Valdes_stars = np.array([53, 12])
band = np.array([256*4883, 1024*76])
freq_range_norm = (band/1.516e9)
SEFD = calc_SEFD(calc_DishArea(26), 100, eff=0.5)
m=3.0; nu =[4883., 76.]; t=3000.
Sens = np.array([calc_Sensitivity(m, nu[0],t,SEFD=SEFD,npol=1.),calc_Sensitivity(m, nu[1],t,SEFD=SEFD,npol=1.)])
dist_std = (20 * u.lyr.to('m')) # Max distance approx
Valdes_EIRP = np.array([calc_EIRP_min(dist_std,Sen) for Sen in Sens])
Valdes_rarity = Valdes_stars*freq_range_norm
Valdes_rarity_tot = Valdes_rarity.sum()
Valdes_EIRP_tot = Valdes_EIRP.max()
iband = 256*4883
Valdes_speed = SEFD**2*nu[0]/iband
Valdes_sky = (Valdes_stars * calc_BeamSize(26,1.5e9)).sum()
Valdes_DFM = Valdes_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Valdes 1986):', SEFD)
print('Sens (Valdes 1986):', Sens)
print('EIRP (Valdes 1986):', Valdes_EIRP)
print('BeamSize (Valdes 1986):',calc_BeamSize(26,1.5e9))
print('Sky Coverage (Valdes 1986):', Valdes_sky)
print('CWTFM (Valdes 1986):', zeta_AO * (Valdes_EIRP_tot)/ (Valdes_rarity_tot))
print('DFM (Valdes 1986):', Valdes_DFM)
print('~o~')
#----------
#Tarter 1980
telsecope = 'NRAO 91m'
Tarter_stars=201
band = 360e3*4.
freq_range_norm = (band/1.666e9)
SEFD = calc_SEFD(calc_DishArea(91), 70, eff=0.6)
m=12.0; nu =5.5; t= 45
Sens = calc_Sensitivity(m, nu,t,SEFD=SEFD)
dist_std = (25*3.26156* u.lyr.to('m')) # Max distance approx
Tarter_EIRP = calc_EIRP_min(dist_std,Sens)
Tarter_rarity = Tarter_stars*freq_range_norm
iband = 360e3
Tarter_speed = SEFD**2*nu/iband
Tarter_sky = Tarter_stars * calc_BeamSize(91,1.666e9)
Tarter_DFM = Tarter_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Tarter1980):', SEFD)
print('Sens (Tarter1980):', Sens)
print('EIRP (Tarter1980):', Tarter_EIRP)
print('BeamSize (Tarter1980):', calc_BeamSize(91,1.666e9))
print('Sky Coverage (Tarter1980):', Tarter_sky)
print('CWTFM (Tarter1980):', zeta_AO * (Tarter_EIRP)/ (Tarter_rarity))
print('DFM (Tarter1980):', Tarter_DFM)
print('~o~')
#----------
#Verschuur1973
telescope=['300ft Telescope', '140ft Telescope']
Verschuur_stars=np.array([3,8])
band = np.array([0.6e6,20e6])
freq_range_norm = (band/1.426e9)
SEFD = np.array([calc_SEFD(calc_DishArea(300*0.3048),110, eff=0.75),calc_SEFD(calc_DishArea(140*0.3048),48, eff=0.75)]) #**NOTE** the 0.75 for the 140' is not real
m=3.0; nu =[490.,7.2e3]; t= [4*60.,5*60.]
Sens = np.array([calc_Sensitivity(m, nu[0],t[0],SEFD=SEFD[0]),calc_Sensitivity(m, nu[1],t[1],SEFD=SEFD[1])])
dist_std = (5*3.26156 * u.lyr.to('m'))
Verschuur_EIRP = np.array([calc_EIRP_min(dist_std,Sen) for Sen in Sens])
Verschuur_rarity = Verschuur_stars*freq_range_norm
Verschuur_rarity_tot = Verschuur_rarity.sum()
Verschuur_EIRP_tot = Verschuur_EIRP.max()
iband = np.array([0.6e6, 2.5e6]) #300 ft: Two 192-channel receivers (at 130 km/s with 4.74kHz=1km/s at this freq.)
Verschuur_speed = SEFD.min()**2*nu[0]/iband[0]
Verschuur_sky = (Verschuur_stars * np.array([calc_BeamSize(300*0.3048,1.42e9),calc_BeamSize(140*0.3048,1.42e9)])).sum()*2 # The two comes from the off beam.
Verschuur_DFM = Verschuur_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Verschuur1973):', SEFD)
print('Sens (Verschuur1973):', Sens)
print('EIRP (Verschuur1973):', Verschuur_EIRP)
print('BeamSize (Verschuur1973):', np.array([calc_BeamSize(300*0.3048,1.42e9),calc_BeamSize(140*0.3048,1.42e9)]))
print('Sky Coverage (Verschuur1973):', Verschuur_sky)
print('CWTFM (Verschuur1973):', zeta_AO * (Verschuur_EIRP_tot)/ (Verschuur_rarity_tot))
print('DFM (Verschuur1973):', Verschuur_DFM)
print('~o~')
#----------
#META Horowitz&Sagan
telescope=''
Horowitz_stars= 1e7
band = 1.2e6
freq_range_norm = (band/1.42e9)
SEFD = calc_SEFD(calc_DishArea(26),85, eff=0.5) # eff=0.5 ==> We were unable to find a value in the literature. We assume a similar value to the antenna of the same dimensions from Valdes & Freitas (1986).
m=30; nu =0.05; t=20
Sens = calc_Sensitivity(m, nu,t,SEFD=SEFD,narrow=False)
dist_std = (700*3.26156 * u.lyr.to('m')) # Max distance: # Horowitz & Sagan (1993) suggested values for the number of stars given a distance, based on the power of an isotropic beacon.
Horowitz_EIRP = calc_EIRP_min(dist_std,Sens)
Horowitz_rarity = Horowitz_stars*freq_range_norm
iband = 400e3
Horowitz_speed = SEFD**2*nu/iband
Horowitz_sky = 41253*.68 # Horowitz_stars * calc_BeamSize(26,1.42e9)
Horowitz_DFM = Horowitz_sky * band / Sens**(3/2.)
if verbose:
print('SEFD (Horowitz):', SEFD)
print('Sens (Horowitz):', Sens)
print('EIRP (Horowitz):', Horowitz_EIRP)
print('BeamSize (Horowitz):', end=' ')
print('Sky Coverage (Horowitz):', Horowitz_sky)
print('CWTFM (Horowitz):', zeta_AO * (Horowitz_EIRP)/ (Horowitz_rarity))
print('DFM (Horowitz):', Horowitz_DFM)
print('~o~')
#---------------------------
#BL
Price_telescopes = ['GBT','GBT','Parkes']
Price_BL_stars = np.array([883,1006,195])
Price_band = np.array([(1.2-1.025+1.925-1.34)*1e9,(2.72-1.82)*1e9,(3.444-2.574)*1e9])
Price_central_freq = np.array([1.5e9,2.27e9,3.0e9,])
Price_freq_range_norm = (Price_band/Price_central_freq)
Dish_D = np.array([100,100,64])
Price_BL_SEFD = np.array([calc_SEFD(calc_DishArea(Dish_D[0]), 20, eff=0.72),
calc_SEFD(calc_DishArea(Dish_D[1]), 20, eff=0.72),
calc_SEFD(calc_DishArea(Dish_D[2]), 35, eff=0.7),
])
m=10.; nu =3.; t=300.
Price_Sens = np.array([calc_Sensitivity(m,nu,t,SEFD=Price_BL_SEFD[i]) for i in range(len(Price_BL_SEFD))])
dist_std = (50*3.26156 * u.lyr.to('m'))
Price_BL_EIRP = np.array([calc_EIRP_min(dist_std,Sen) for Sen in Price_Sens])
Price_BL_rarity = Price_BL_stars*Price_freq_range_norm
Price_BL_stars_tot = Price_BL_stars[:2].sum()
Price_rarity_tot = Price_BL_rarity[:2].sum()
Price_EIRP_tot = Price_BL_EIRP[:2].max()
iband = 900e6
Price_BL_speed = Price_BL_SEFD.mean()**2*nu/iband
Price_BL_sky = Price_BL_stars * np.array([calc_BeamSize(Dish_D[i],Price_central_freq[i]) for i in range(len(Dish_D))])
Price_BL_DFM = Price_BL_sky * Price_band / Price_Sens**(3/2.)
if verbose:
print('SEFD (Price_BL):', Price_BL_SEFD)
print('Sens (Price_BL):', Price_Sens)
print('EIRP (Price_BL):', Price_BL_EIRP)
print('BeamSize (Price_BL):', np.array([calc_BeamSize(Dish_D[i],Price_central_freq[i]) for i in range(len(Dish_D))]))
print('Sky Coverage (Price_BL):', Price_BL_sky.sum())
print('CWTFM (Price_BL):', zeta_AO *(Price_BL_EIRP) / (Price_BL_rarity))
print('CWTFM (Price_BL_tot):', zeta_AO *(Price_EIRP_tot) / (Price_rarity_tot))
print('DFM (Price_BL):', Price_BL_DFM)
print('~o~')
#---------------------------
#Tremblay 2020
#tremblay = add_SETI_limits.add_SETI_limits('Tremblay 2020', 'MWA', 10355066, 128e6-98e6, (128e6-98e6)2, 3e3, tsys, app_eff, snr, spec_res, obs_time_per_scan, dist_max, 30.72e6, fig_shape, fig_color)
tremblay_transmitter_rate = -2.86
tremblay_EIRP_min = 17.23
#---------------------------------------------------------------------------------
#EIRP values in watts.
P = np.array([1e12,1e14,1e16,1e18,1e20,1e22])
#---------------------------
# Luminosity function limit on putative transmitters.
#plt.plot(np.log10(P),np.log10(calc_NP_law3(P)),lw=20,color='gray',alpha=0.3)#,label=r'$\alpha$: %s'%alpha)
arecibo, = plt.plot([17,17],[-11,-9.5],'--',lw=2,color='k',alpha=1,label='Arecibo')
arecibo1, = plt.plot([17,17], [-6.5, 4], '--', lw=2, color='k', alpha=1)
plt.text(17, -9.3, r'$\it{Solar~Power}$', {'va': 'bottom', 'ha': 'center'}, rotation=90, fontsize=24)
solarpower, = plt.plot([13,13],[-11,-9],lw=2,color='k',alpha=1, label='Solar Power')
solarpower1, = plt.plot([13,13], [-7, 4], lw=2, color='k', alpha=1)
plt.text(13, -8.8, r'$\it{Arecibo}$', {'va': 'bottom', 'ha': 'center'}, rotation=90, fontsize=24)
#---------------------------
alpha = 0.7
markersize = 20
fontsize = 20
ticksize = fontsize - 2
dot_size = markersize - 12
## PLOTTING
# different colors for different bands...
# L: Red
# S: Green
# C: Orange
# X: Blue
# if surveys overlap one or more bands, just add a markeredgecolor
LBand = 'dodgerblue'
SBand = 'firebrick'
CBand = 'seagreen'
XBand = 'peru'
price1, = plt.plot([np.log10(Price_BL_EIRP[2])],[np.log10(1./Price_BL_rarity[2])],'h', color = SBand, markersize = markersize)
price2, = plt.plot([np.log10(Price_EIRP_tot)],[np.log10(1./Price_rarity_tot)],'h', color = LBand,markersize = markersize, markeredgewidth=2, markeredgecolor=SBand)
#-------
enriquez, = plt.plot([np.log10(BL_EIRP)],[np.log10(1./BL_rarity)],'D', color = LBand,markeredgecolor='#1c608e',markersize = markersize-8, markeredgewidth=2)
# Gray and Mooley did a lot at L-Band and 1 at X-Band
graymooley, = plt.plot(np.log10(GM_EIRP),np.log10(1./GM_rarity),'o',color =LBand,markeredgecolor=XBand,markeredgewidth=2,markersize = markersize)
# All of Harp observations were conducted at L-Band and S-Band
#harpab, = plt.plot(np.log10(ATA_EIRP[0:2]),np.log10(1./ATA_rarity[0:2]),'^',color =LBand,markeredgecolor=SBand,markeredgewidth=2, markersize = markersize)
#harpc, = plt.plot(np.log10(ATA_EIRP[2]),np.log10(1./ATA_rarity[2]),'s',color =LBand,markeredgecolor=SBand,markeredgewidth=2,markersize = markersize)
#harpd, = plt.plot(np.log10(ATA_EIRP[3]),np.log10(1./ATA_rarity[3]),'h',color =LBand,markeredgecolor=SBand,markeredgewidth=2,markersize = markersize)
harpall1, = plt.plot(np.log10(ATA_EIRP[0]),np.log10(1./ATA_rarity_tot),marker='^', color=CBand,markersize = markersize,markeredgecolor=SBand,markeredgewidth=2,linestyle='None')
#harpall2, = plt.plot(np.log10(ATA_EIRP[0:1]),np.log10(1./ATA_rarity[0:1]),marker='o', color=LBand, markeredgecolor=CBand)
#harpall2, = plt.plot(np.log10(ATA_EIRP[0]),np.log10(1./ATA_rarity_tot),marker='^', color=LBand,markersize = markersize-2,linestyle='None')
siemion1, = plt.plot([np.log10(Siemion_EIRP)],[np.log10(1./Siemion_rarity)],'>',color =LBand,markersize = markersize, linestyle='None')
siemion2, = plt.plot([np.log10(Siemion_EIRP)],[np.log10(1./Siemion_rarity)],marker='o',color=LBand,markersize = dot_size, linestyle='None')
#phoenix, = plt.plot(np.log10(Ph_EIRP),np.log10(1./Ph_rarity),'<b',color=SBand,markersize = markersize)
phoenixall, = plt.plot([np.log10(Ph_EIRP_tot)],[np.log10(1./Ph_rarity_tot)],marker='<',color=SBand,markersize = markersize)
horowitz_sagan, = plt.plot(np.log10(Horowitz_EIRP),np.log10(1./Horowitz_rarity),marker='s',color =SBand,markeredgecolor='w',markersize = markersize, linestyle='None')
#valdez, = plt.plot(np.log10(Valdes_EIRP),np.log10(1./Valdes_rarity),'sy',color =LBand, markersize = markersize)
#tarter, = plt.plot([np.log10(Tarter_EIRP)],[np.log10(1./Tarter_rarity)],'vc',color =LBand,markersize = markersize)
#verschuur, = plt.plot(np.log10(Verschuur_EIRP),np.log10(1./Verschuur_rarity),'sm',color =LBand,markersize = markersize)
mwa, = plt.plot(tremblay_EIRP_min, tremblay_transmitter_rate, marker='X', color='tab:purple', markersize=markersize, markeredgecolor='w', linestyle='None')#, linestyle='None')
legend_handles = {'p1':price1, 'p2':price2, 'e':enriquez, 'gm':graymooley, 'h_a1':harpall1, 's1':siemion1, 's2':siemion2, 'pha':phoenixall, 'hs':horowitz_sagan, 'm':mwa}
#legend_handles = {'p1':price1, 'p2':price2, 'e':enriquez, 'gm':graymooley, 'h_ab':harpab, 'h_c':harpc, 'h_d':harpd, 'h_a1':harpall1, 'h_a2':harpall2, 's1':siemion1,'s2':siemion2, 'ph':phoenix, 'pha':phoenixall, 'hs':horowitz_sagan, 'v':valdez, 't':tarter, 'ver':verschuur, 'm':mwa}
#legend_handles = {'p1':price1, 'p2':price2, 'e':enriquez, 'gm':graymooley, 'h_ab':harpab, 'h_c':harpc, 'h_d':harpd, 'h_a1':harpall1, 'h_a2':harpall2, 's1':siemion1, 's2':siemion2, 'ph':phoenix, 'pha':phoenixall, 'hs':horowitz_sagan, 'v':valdez, 't':tarter, 'ver':verschuur}
return legend_handles
def compare_SETI_limits(EIRP,rarity,shape='o',color='k',project='This Project',y_label_units=True, save_as=None):
''' Compare SETI project with previus surveys.
'''
#---------------------------------------------------------------------------------
# plotting setup
plt.ion()
plt.figure(figsize=(15, 10))
alpha = 0.7
markersize = 20
fontsize = 20
ticksize = fontsize - 2
dot_size = markersize - 12
plt.plot([np.log10(EIRP)],[np.log10(1./rarity)],marker = shape, color = color,markersize = markersize, label=project)
ET_power_law()
plt.legend(numpoints=1,scatterpoints=1,fancybox=True, shadow=True)
plt.xlabel('EIRP [log(W)]',fontsize = fontsize)
#plt.ylabel('Transmiter Galactic Rarity [log((Nstars*BW)^-1)]',fontsize=fontsize)
if y_label_units:
plt.ylabel('Transmitter Rate \n [log(1/(Nstars * rel_BW))]',fontsize=fontsize)
else:
plt.ylabel('Transmitter Rate ',fontsize=fontsize)
plt.xticks(fontsize = ticksize)
plt.yticks(fontsize = ticksize)
#plt.ylim(-10,4)
#plt.xlim(10,23)
image_filename = 'SETI_limits_comparison'
if save_as is not None:
image_filename = save_as
from datetime import datetime
image_filename = image_filename + datetime.now().strftime("_%m-%d-%y_%H:%M:%S") + '.png'
plt.savefig(image_filename, format='png',bbox_inches='tight')
# plt.savefig('Transmitter_Rarity_FoM.pdf', format='pdf', dpi=300,bbox_inches='tight')
| 24,987 | 38.166144 | 286 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/seti_limits_py3/.ipynb_checkpoints/4band_compare-checkpoint.py
|
from color_coded_ET_power_law import *
import matplotlib
#matplotlib.rcParams['text.usetex'] = True
#import matplotlib.pyplot as plt
from LBand_SETI_compare import lband_compare
from SBand_SETI_compare import sband_compare
from CBand_SETI_compare import cband_compare
from XBand_SETI_compare import xband_compare
import numpy as np
#params = {'text.usetex':True,
# 'font.family':'serif',
# 'font.serif':['Palatino']}
#plt.rcParams.update(params)
def seti_compare(y_label_units=True):
''' Compare SETI project with previus surveys.
'''
# Get dictionaries of plot-relevant values
Lband = lband_compare(save=False)
Sband = sband_compare(save=False)
Cband = cband_compare(save=False)
Xband = xband_compare(save=False)
# Place all dictionaries in list --> Allows plotting via for-loop
dict_list = [Lband, Sband, Cband, Xband]
Lband['color'] = 'mediumorchid'
#---------------------------------------------------------------------------------
# plotting setup
plt.ion()
#plt.figure(figsize=(15, 10))
alpha = 0.7
markersize = 20
fontsize = 20
ticksize = fontsize - 2
dot_size = markersize - 12
colors = ['tab:red','tab:green','tab:orange','tab:blue']
band_handles = {'L':[],'S':[],'C':[],'X':[]}
band_letters = ['L','S','C','X']
# Plot values for all 4 bands
for i, band_dict in enumerate(dict_list):
outside, = plt.plot(np.log10(band_dict['EIRP']),np.log10(1./band_dict['rarity']),marker = '*', linestyle='None', color = colors[i], markersize = markersize-2)
#outside, = plt.plot(np.log10(band_dict['EIRP']),np.log10(1./band_dict['rarity']),marker = (4,1,30), linestyle='None', color = colors[i], markersize = markersize)
#inside, = plt.plot([np.log10(band_dict['EIRP'])],[np.log10(1./band_dict['rarity'])],marker='o', color='k', markersize = dot_size-5, linestyle='None')
band_handles[band_letters[i]].append(outside)
#band_handles[band_letters[i]].append(inside)
#plt.legend((outside, inside), band_dict['project'])
#plt.plot([np.log10(band_dict['EIRP'])],[np.log10(1./band_dict['rarity'])],marker = band_dict['shape'], color = band_dict['color'],markersize = markersize, label=band_dict['project'])
# Plot values of other surveys
h = ET_power_law()
plt.legend([band_handles['L'][0], band_handles['S'][0], band_handles['C'][0], band_handles['X'][0], h['p1'], h['p2'], h['e'], h['gm'], (h['h_a1'], h['h_a2']), h['s1'], h['pha'], h['hs'], h['m']], ['This Project: L-Band', 'This Project: S-Band', 'This Project: C-Band', 'This Project: X-Band', 'Price (2020 - Parkes)','Price (2020 - GBT)','Enriquez (2017)','Gray&Mooley (2017)', 'Harp (2016) All*','Siemion (2013)','Phoenix All*','Horowitz&Sagan (1993)', 'Tremblay (2020)'], labelspacing=1.75)
#plt.legend([band_handles['L'][0], band_handles['S'][0], band_handles['C'][0], band_handles['X'][0], h['p1'], h['p2'], h['e'], h['gm'], h['h_ab'], h['h_c'], h['h_d'], (h['h_a1'], h['h_a2']), (h['s1'], h['s2']), h['ph'], h['pha'], h['hs'], h['v'], h['t'], h['ver']], ['This Project: L-Band', 'This Project: S-Band', 'This Project: C-Band', 'This Project: X-Band', 'Price (2019 - Parkes)','Price (2019 - GBT)','Enriquez (2017)','Gray&Mooley (2017)', 'Harp (2016) a,b','Harp (2016) c','Harp (2016) d','Harp (2016) All*','Siemion (2013)','Phoenix','Phoenix All*','Horowitz&Sagan (1993)','Valdes (1986)','Tarter (1980)','Verschuur (1973)'])
#plt.legend(numpoints=1,scatterpoints=1,fancybox=True, shadow=True)
plt.ylim(-10,0)
plt.xlabel(r'EIRP$_{min}\ \left[\/\log_{10}\left(Watts\right)\/\right]$',fontsize = fontsize)
#plt.ylabel('Transmiter Galactic Rarity [log((Nstars*BW)^-1)]',fontsize=fontsize)
if y_label_units:
plt.ylabel(r'Transmitter Rate $\left[\/\log\left(\frac{1}{N_{stars} \cdot \nu_{rel}}\right)\/\right]$',fontsize=fontsize)
else:
plt.ylabel('Transmitter Rate ',fontsize=fontsize)
plt.xticks(fontsize = ticksize)
plt.yticks(fontsize = ticksize)
#plt.ylim(-10,4)
#plt.xlim(10,23)
from datetime import datetime
image_filename = 'images/'+'SETI_limits_comparison' + datetime.now().strftime("_%m-%d-%y_%H:%M:%S") + '.png'
plt.savefig(image_filename, format='png',bbox_inches='tight')
import os
os.system("shotwell %s &"%(image_filename))
seti_compare()
| 4,400 | 48.449438 | 635 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/seti_limits_py3/.ipynb_checkpoints/LBand_SETI_compare-checkpoint.py
|
from ET_power_law import *
import matplotlib.pylab as plt
def lband_compare(max_distance, save=False):
#---------------------------
# Edit values below vvvvvv
#---------------------------
# Your new values ( e.g. Enriquez 2017 x 100)
project = 'This Study: L-Band' #Project name
telescope = 'GBT' # Telescope name
N_stars = 5 # Estimated number of stars
band = 660e6 # Total bandwidth [Hz]
central_freq = 1.5e9 # Central bandwidth [Hz]
dish_diam = 100 #Telescope diameter meters (single dish in current version)
dish_Tsys = 15.60 #Telescope Tsys [Kelvin]
dish_app_eff = 0.72 #Telescope Apperture Efficiency
SNR_threshold = 10 #Survey threshold [sigma above the mean]
spectral_resolution = 3. #Spectral resolution [Hz]
scan_obs_time = 300 # Observation time per scan [sec]
#max_distance = 505.33 # Maximum distance [pc]
iband = 800e6 #Instantaneous Bandwidth [Hz]
shape = '*' # Figure shape
color = 'r' # Figure color
y_label_units = True # Units in Y label
#---------------------------
# Edit values above ^^^^^
#---------------------------
#Calculating limits
zeta_AO = 1e3*0.5/ 1e13
freq_range_norm = (band/central_freq)
SEFD = calc_SEFD(calc_DishArea(dish_diam), dish_Tsys, eff=dish_app_eff) # 10 Jy (GBT)
SEFD = 7.7251
Sens = calc_Sensitivity(SNR_threshold, spectral_resolution,scan_obs_time,SEFD=SEFD)
dist_m = (max_distance*3.26156 * u.lyr.to('m'))
EIRP = calc_EIRP_min(dist_m,Sens)
#EIRP = 1926e12
#EIRP = 491.6e12
survey_rarity = N_stars*freq_range_norm
survey_speed = SEFD**2*spectral_resolution/iband
survey_sky = N_stars * calc_BeamSize(dish_diam,central_freq)
survey_DFM = survey_sky * band / Sens**(3/2.)
def print_project():
print('~o~', project ,' (', telescope,') ', '~o~')
print('SEFD :', SEFD)
print('Sens :', Sens)
print('Max Distance (m) :', dist_m)
print('EIRP :', EIRP)
print('BeamSize :', calc_BeamSize(dish_diam,central_freq))
print('Sky Coverage :', survey_sky)
print('CWTFM :', zeta_AO *(EIRP) / (survey_rarity))
print('DFM :', survey_DFM)
print_project()
#---------------------------
#Comparing SETI limits
if save:
compare_SETI_limits(EIRP,survey_rarity,shape=shape,color=color,project=project,y_label_units=y_label_units, save_as='LBand_seti_compare')
lband_dict = {'EIRP':EIRP, 'rarity':survey_rarity, 'shape':shape, 'color':color, 'project':project, 'y_label_units':y_label_units}
return lband_dict
| 2,641 | 32.025 | 145 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/analysis/seti_limits_py3/.ipynb_checkpoints/SBand_SETI_compare-checkpoint.py
|
from ET_power_law import *
import matplotlib.pylab as plt
def sband_compare(max_distance, save=False):
#---------------------------
# Edit values below vvvvvv
#---------------------------
# Your new values ( e.g. Enriquez 2017 x 100)
project = 'This Study: S-Band' #Project name
telescope = 'GBT' # Telescope name
N_stars = 17 # Estimated number of stars
band = 940e6 # Total bandwidth [Hz]
central_freq = 2.3e9 # Central bandwidth [Hz]
dish_diam = 100 #Telescope diameter meters (single dish in current version)
dish_Tsys = 14.80 #Telescope Tsys [Kelvin]
dish_app_eff = 0.72 #Telescope Apperture Efficiency
SNR_threshold = 10 #Survey threshold [sigma above the mean]
spectral_resolution = 3. #Spectral resolution [Hz]
scan_obs_time = 300 # Observation time per scan [sec]
#max_distance = 560.22 #Maximum distance [pc]
iband = 1000e6 #Instantaneous Bandwidth [Hz]
shape = '*' # Figure shape
color = 'g' # Figure color
y_label_units = True # Units in Y label
#---------------------------
# Edit values above ^^^^^
#---------------------------
#Calculating limits
zeta_AO = 1e3*0.5/ 1e13
freq_range_norm = (band/central_freq)
SEFD = calc_SEFD(calc_DishArea(dish_diam), dish_Tsys, eff=dish_app_eff) # 10 Jy (GBT)
SEFD = 14.80
Sens = calc_Sensitivity(SNR_threshold, spectral_resolution,scan_obs_time,SEFD=SEFD)
dist_m = (max_distance*3.26156 * u.lyr.to('m'))
EIRP = calc_EIRP_min(dist_m,Sens)
#EIRP = 466.4e12
survey_rarity = N_stars*freq_range_norm
survey_speed = SEFD**2*spectral_resolution/iband
survey_sky = N_stars * calc_BeamSize(dish_diam,central_freq)
survey_DFM = survey_sky * band / Sens**(3/2.)
def print_project():
print('~o~', project ,' (', telescope,') ', '~o~')
print('SEFD :', SEFD)
print('Sens :', Sens)
print('EIRP :', EIRP)
print('Max Distance (m) :', dist_m)
print('BeamSize :', calc_BeamSize(dish_diam,central_freq))
print('Sky Coverage :', survey_sky)
print('CWTFM :', zeta_AO *(EIRP) / (survey_rarity))
print('DFM :', survey_DFM)
print_project()
#---------------------------
#Comparing SETI limits
if save:
compare_SETI_limits(EIRP,survey_rarity,shape=shape,color=color,project=project,y_label_units=y_label_units, save_as='SBand_seti_compare')
sband_dict = {'EIRP':EIRP, 'rarity':survey_rarity, 'shape':shape, 'color':color, 'project':project, 'y_label_units':y_label_units}
return sband_dict
| 2,618 | 33.012987 | 145 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/run-turboSETI/wrapTurbo.py
|
# Imports
import os, sys, time
import numpy as np
import pandas as pd
import pymysql
from turbo_seti.find_doppler.find_doppler import FindDoppler
def wrap_turboSETI(iis, outDir, sqlTable, t=True, test=False):
'''
outDir : directory to store output subdirectories
sqlTable : input SQL table name
t : boolean, if true runtime is written to spreadsheet
returns : outputs .dat files from turboSETI
'''
# Make sure index list is an Array
if type(iis) == str:
if iis[0] == '[' or iis[-1] == ']':
iis = np.fromstring(iis.strip('[]'), dtype=int, sep=',')
else:
print('Unexpected Format')
elif type(iis) != type(np.array([])):
iis = np.array(iis)
# Read in mysql database
db = pymysql.connect(host=os.environ['GCP_IP'], user=os.environ['GCP_USR'],
password=os.environ['GCP_PASS'], database='FileTracking')
query = f'''
SELECT *
FROM {sqlTable}
'''
fileinfo = pd.read_sql(query, db)
if test:
print(f'turboSETI infile : \n {fileinfo}')
# Select necessary columns
filepaths = fileinfo['filepath'].to_numpy()
filenames = fileinfo['filename'].to_numpy()
target = fileinfo['target_name'].to_numpy()
tois = fileinfo['toi'].to_numpy()
row_num = fileinfo['row_num'].to_numpy()
# Run turboSETI
for ii, infile in zip(iis, filepaths[iis]):
# start timer
if t:
start = time.time()
# Set up output subdirectory
outdir = os.path.join(outDir, f"TOI-{tois[ii]}")
if not test:
# make out directory it if doesn't exits
print(outdir)
if not os.path.exists(outdir):
os.mkdir(outdir)
# Write to log file
outlog = os.path.join(outdir, f'{tois[ii]}-cadence.log')
with open(outlog, 'a+') as f:
f.write(f'Starting turboSETI for {infile}\n')
# Run turboSETI
try:
fd = FindDoppler(infile, max_drift=4, snr=10, out_dir=outdir)
fd.search(n_partitions=32)
except Exception as e:
with open(outlog, 'a+') as f:
f.write(str(e))
sys.exit()
# Also initiate cursor for updating the table later
cursor = db.cursor()
# End timer and write to spreadsheet
name = filenames[ii].split('.')[0] + '.dat'
try:
if t:
runtime = time.time() - start
if not test:
with open(outlog, 'a+') as f:
f.write('{} Runtime : {}\n'.format(target[ii], runtime))
sqlcmd = f"""
UPDATE {sqlTable}
SET runtime={runtime},
outpath='{os.path.join(outdir,name)}',
turboSETI='TRUE'
WHERE row_num={row_num[ii]}
"""
cursor.execute(sqlcmd)
db.commit()
else:
sqlcmd = f"""
UPDATE {sqlTable}
SET outpath='{os.path.join(outdir,name)}',
turboSETI='TRUE'
WHERE row_num={row_num[ii]}
"""
cursor.execute(sqlcmd)
db.commit()
except Exception as e:
with open(outlog, 'a+') as f:
f.write(str(e))
sys.exit()
if not test:
with open(outlog, 'a+') as f:
f.write(f'Finished running turboSETI on {infile}')
f.write('\n')
if test:
time.sleep(0.1)
def main():
'''
Access spreadsheet with file information data then run turboSETI on those
files if it has not already been run. Outputs to subdirectories labelled by
cadence ON target inside specified directory
'''
dir = '/datax2/scratch/noahf'
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--ii', help='Array of indexes to run through turboSETI')
parser.add_argument('--sqlTable', help='SQL table with data')
parser.add_argument('--outdir', help='output directory', type=str, default=dir)
parser.add_argument('--timer', help='Should the runtime be recorded', type=bool, default=True)
parser.add_argument('--test', help='If true, script enters testing mode', type=bool, default=False)
args = parser.parse_args()
wrap_turboSETI(args.ii, args.outdir, args.sqlTable, t=args.timer, test=args.test)
if __name__ == '__main__':
sys.exit(main())
| 4,765 | 30.773333 | 103 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/run-turboSETI/multiTurbo.py
|
# Imports
import os, sys, time
import subprocess as sp
import numpy as np
import pandas as pd
import pymysql
def getNodes(nnodes):
'''
Get a list of compute nodes based on the number of nodes given
nnodes [int] : number of nodes to run the commands on
returns : list of compute nodes to run on at GBT
'''
# Create list of available compute nodes
cn = []
for i in range(8):
for k in range(10):
if i == 0:
node = f'blc{k}'
else:
node = f'blc{i}{k}'
if int(node[-1])<=7 and node!='blc47': # skip blc47 because it has an error
cn.append(node)
# Choose compute nodes starting with highest number
cn = np.flip(cn)
cn = cn[:nnodes]
print(f'Running on compute nodes {min(cn)} to {max(cn)}')
return cn
def getIndex(sqlTable, splicedonly, unsplicedonly, debug=False):
'''
Get list of indexes to run turboSETI on
sqlTable [str] : name of sql table to reference in FileTracking database
splicedonly [bool] : if only the spliced files should be run through
unsplicedonly [bool] : if only the spliced files should be run through
debug [bool] : if True, print statements for debugging
returns : list of indexes to run turboSETI on
'''
mysql = pymysql.connect(host=os.environ['GCP_IP'], user=os.environ['GCP_USR'],
password=os.environ['GCP_PASS'], database='FileTracking')
query = f'''
SELECT *
FROM {sqlTable}
'''
fileinfo = pd.read_sql(query, mysql)
if debug:
print(f'table used : \n{fileinfo}')
# Create 2D array of indexes
spliced = fileinfo['splice'].to_numpy()
tois = fileinfo['toi'].to_numpy()
turbo = fileinfo['turboSETI'].to_numpy()
uniqueIDs = np.unique(tois)
ii2D = []
for id in uniqueIDs:
if splicedonly:
arg = (spliced == 'spliced') * (tois == id) * (turbo == 'FALSE')
elif unsplicedonly:
arg = (spliced == 'unspliced') * (tois == id) * (turbo == 'FALSE')
else:
arg = (tois == id) * (turbo == 'FALSE')
whereID = np.where(arg)[0]
ii2D.append(whereID)
if debug:
print(f'indexes used: {ii2D}')
# Get number of files running through
length = 0
for row in ii2D:
length+=len(row)
print(f"Running turboSETI on {length} files")
return ii2D
def multiCommand(nodes, commands, slowdebug=False):
'''
Run n commands on n compute nodes
nodes [list] : list of compute nodes to run on
commands [list] : list of commands to run on each compute nodes, the first
command will be run on the first compute node, etc.
slowdebug [bool] : if True, prints subprocess output as it goes
returns list of subprocess Popen objects, one for each compute node
'''
# Run on separate compute nodes
ps = []
for cmd, node in zip(commands, nodes):
ssh = sp.Popen(cmd, universal_newlines=True, stdout=sp.PIPE, stderr=sp.PIPE, stdin=sp.PIPE)
ps.append(ssh)
if slowdebug:
print(ssh.stdout.readlines(), ssh.stderr.readlines())
return ps
def main():
'''
Run turboSETI in parallel across multiple compute nodes on GBT
Must setup environment variables with access to a mysql database as specified
in README
INPUT OPTIONS
nnodes : number of compute nodes to run on, default is
debug : prints specific lines to help debug subprocess
timer : times the run if set to true, default is true
outdir : output directory of turboSETI files, will consist of subdirectories
labelled by TOI (ON target)
sqlTable : name of SQL table
splicedonly : If True only spliced files are run through
unsplicedonly : If True only unspliced files are run through
'''
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--nnodes', help='Number of Compute nodes to run on', type=int, default=64)
parser.add_argument('--debug', help='if true run script in debug mode', type=bool, default=False)
parser.add_argument('--timer', help='times run if true', type=bool, default=True)
parser.add_argument('--outdir', help='Output Directory for turboSETI files', type=str, default='/datax2/scratch/noahf')
parser.add_argument('--sqlTable', help='Table name in the sql database', type=str)
parser.add_argument('--splicedonly', help='Should it be run on only the spliced files', type=bool, default=False)
parser.add_argument('--unsplicedonly', help='Should it be run on only the spliced files', type=bool, default=False)
parser.add_argument('--slowdebug', type=bool, default=False)
args = parser.parse_args()
if args.timer:
start = time.time()
condaenv = '/home/noahf/miniconda3/bin/activate'
cwd = os.getcwd()
varPath = '~/.bash_profile'
print(f'Writing files to {args.outdir}')
cns = getNodes(args.nnodes)
ii2D = getIndex(args.sqlTable,
splicedonly=args.splicedonly,
unsplicedonly=args.unsplicedonly,
debug=args.debug)
cmds = []
nodes = []
for node, ii in zip(cns, ii2D):
if len(ii) > 0:
print(f"Running turboSETI on {len(ii)} files on compute node: {node}")
if args.debug:
cmd = ['ssh', node, f"source {condaenv} runTurbo ; source {varPath} ; python3 {cwd}/wrapTurbo.py --ii '{ii.tolist()}' --timer {args.timer} --outdir {args.outdir} --test {args.debug} --sqlTable {args.sqlTable}"]
print(f'Running: {cmd}')
else:
cmd = ['ssh', node, f"source {condaenv} runTurbo ; source {varPath} ; python3 {cwd}/wrapTurbo.py --ii '{ii.tolist()}' --timer {args.timer} --outdir {args.outdir} --sqlTable {args.sqlTable}"]
cmds.append(cmd)
nodes.append(node)
ps = multiCommand(nodes, cmds, slowdebug=args.slowdebug)
try:
for p in ps:
p.communicate()
except KeyboardInterrupt:
for p, cn in zip(ps, nodes):
exitcmd = ['ssh', cn, f"kill -9 $(pidof python3 {cwd}/wrapTurbo.py)"]
exitssh = sp.Popen(exitcmd, universal_newlines=True, stdout=sp.PIPE, stderr=sp.PIPE)
print('All Processes Terminated')
if args.timer:
print(time.time()-start)
if __name__ == '__main__':
sys.exit(main())
| 6,522 | 32.280612 | 226 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/run-turboSETI/prepTurbo/turboSETI-wrapper.py
|
# Imports
import os, sys, time
import pandas as pd
import numpy as np
from turbo_seti.find_doppler.find_doppler import FindDoppler
import gspread
from oauth2client.service_account import ServiceAccountCredentials
def getSheet():
'''
gains access to the necessary google spreadsheet
returns google spreadsheet client
'''
# Gain access to the google sheets and read in table with pandas
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
jsonfile = os.path.join(os.getcwd(), 'client_info.json')
credentials = ServiceAccountCredentials.from_json_keyfile_name(jsonfile, scope)
client = gspread.authorize(credentials)
ss = client.open('franz-turboSETI-input-file-info').sheet1
return ss
def readSpreadsheet():
'''
Reads in information from google sheets file-info table
Necessary headers and example table here:
https://docs.google.com/spreadsheets/d/1Jny2OhsXjr23HhlN_e5bJ-WITwP2HSFZJIBWg3Dpr_Y/edit?usp=sharing
returns : pandas table for input into turboSETI wrapper
'''
# Read in info from spreadsheet
ss = getSheet()
ssdata = ss.get_all_records()
fileinfo = pd.DataFrame.from_dict(ssdata)
# Shorten fileinfo to only the ones that haven't run through turboSETI
notDone = fileinfo[fileinfo['TurboSETI?'] == 'FALSE']
return notDone
def writeSpreadsheet(row, column=9, msg='TRUE'):
'''
Updates google spreadsheet according to new changes
row [int] : row index that needs to be updated, count starts at 1
column [int] : column index that needs to be updated, defaulted to 9,
count starts at 1
msg [string] : Message to put in cell, defaulted to TRUE
returns : nothing, just updates the google sheet
'''
ss = getSheet()
ss.update_cell(row, column, msg)
def wrap_turboSETI(outDir, t=True):
'''
outDir : directory to store output subdirectories
t : boolean, if true runtime is written to spreadsheet
returns : outputs .dat files from turboSETI
'''
# Read in spreadsheet for files not run through TurboSETI
notdone = readSpreadsheet()
# Select necessary columns
filepaths = notdone['FILE PATH'].to_numpy()
filenames = notdone['FILE NAME'].to_numpy()
target = notdone['TARGET NAME'].to_numpy()
tois = notdone['TOI'].to_numpy()
spliced = notdone['SPLICED?'].to_numpy()
#isSpliced = np.where(spliced == 'spliced')[0]
# Run turboSETI
for ii, infile in enumerate(filepaths):
# for now just run on spliced files
if spliced[ii] == 'spliced':
# start timer
if t:
start = time.time()
# Set up output subdirectory
outdir = os.path.join(outDir, f'TOI-{tois[ii]}')
# Make out directory if it doesn't exist
if not os.path.exists(outdir):
os.makedirs(outdir)
# Run turboSETI
fd = FindDoppler(infile, max_drift=4, snr=10, out_dir=outdir)
fd.search(n_partitions=8)
# for now just
#print("it's working!!")
# End timer and write to spreadsheet if time is true
if t:
runtime = time.time() - start
print()
print(f'{target[ii]} Runtime : {runtime}')
writeSpreadsheet(ii+2, column=10, msg=runtime)
# Write outfile path to spreadsheet
name = filenames[ii].split('.')[0] + '.dat'
writeSpreadsheet(ii+2, column=11, msg=os.path.join(outdir, name))
# Update spreadsheet to reflect turboSETI run
# Add 2 because sheets is 1 indexed (+1) and first line is a header (+1)
writeSpreadsheet(ii+2)
def main():
'''
Access spreadsheet with file information data then run turboSETI on those
files if it has not already been run. Outputs to subdirectories labelled by
cadence ON target inside specified directory
'''
import argparse
dir = '/datax/scratch/noahf/turboSETI-outFiles'
parser = argparse.ArgumentParser()
parser.add_argument('--outdir', help='turboSETI output directory', type=str, default=dir)
parser.add_argument('--timer', help='should the run be timed', type=bool, default=True)
args = parser.parse_args()
if args.timer:
wrap_turboSETI(args.outdir)
else:
wrap_turboSETI(args.outdir, t=False)
if __name__ == '__main__':
sys.exit(main())
| 4,535 | 30.5 | 104 |
py
|
BL-TESSsearch
|
BL-TESSsearch-main/run-turboSETI/connect-spreadsheet/connectSheet.py
|
# Imports
import pandas as pd
import os
import gspread
from oauth2client.service_account import ServiceAccountCredentials
def getSheet():
'''
gains access to the necessary google spreadsheet
returns google spreadsheet client
'''
# Gain access to the google sheets and read in table with pandas
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
jsonfile = os.path.join(os.getcwd(), 'client_info.json')
credentials = ServiceAccountCredentials.from_json_keyfile_name(jsonfile, scope)
client = gspread.authorize(credentials)
# Real path to spreadsheet
ss = client.open('franz-turboSETI-input-file-info').sheet1
# Uncomment below to test
#ss = client.open('backup-franz-turboSETI-input-file-info').sheet1
return ss
def readSheet():
'''
Reads in information from google sheets file-info table
Necessary headers and example table here:
https://docs.google.com/spreadsheets/d/1Jny2OhsXjr23HhlN_e5bJ-WITwP2HSFZJIBWg3Dpr_Y/edit?usp=sharing
returns : pandas table for input into turboSETI wrapper
'''
# Read in info from spreadsheet
ss = getSheet()
ssdata = ss.get_all_records()
fileinfo = pd.DataFrame.from_dict(ssdata)
return fileinfo
def writeSheet(row, column=9, msg='TRUE'):
'''
Updates google spreadsheet according to new changes
row [int] : row index that needs to be updated, count starts at 1
column [int] : column index that needs to be updated, defaulted to 9,
count starts at 1
msg [string] : Message to put in cell, defaulted to TRUE
returns : nothing, just updates the google sheet
'''
ss = getSheet()
ss.update_cell(row, column, msg)
| 1,757 | 28.79661 | 104 |
py
|
fat-albert
|
fat-albert-master/abmn/__init__.py
| 0 | 0 | 0 |
py
|
|
fat-albert
|
fat-albert-master/abmn/src/eval_ensemble.py
|
# compute accuracy of majority vote of all prediction files (probabilities_{MODEL_NANE} in ensemble directory
import glob
import numpy as np
import argparse
import os
def eval_ensemble(ensemble_directory, gold_labels_file, predictions_file, evaluation_file, ensemble_name):
# read the gold labels
with open(gold_labels_file) as f:
content = f.readlines()
gold_labels = [int(x.split()[1]) for x in content]
# read the predictions of the individual models
predictions = np.zeros((len(gold_labels), 5))
model_prediction_files = glob.glob(ensemble_directory + "/probabilities_*")
print(model_prediction_files)
for model_prediction_file in model_prediction_files:
print("evaluating model %s" % model_prediction_file)
with open(model_prediction_file) as f:
content = f.readlines()
for i, line in enumerate(content[1:]):
pred_ind = int(line.split("\t")[2])
if model_prediction_file == "probabilities_LSTM.txt":
predictions[i][pred_ind] += 1
elif model_prediction_file == "probabilities_CNN.txt":
predictions[i][pred_ind] += 1
else:
predictions[i][pred_ind] += 1
predictions_bert = np.loadtxt(ensemble_directory + "/preds.txt.bak")
counter = 0
for i in predictions_bert:
predictions[counter][int(i)] += 2.5
counter = counter + 1
# compute majority votes and evaluate against gold standard
total_acc = 0.0
with open(predictions_file, "w") as p, open(evaluation_file, "w") as e:
for i, sample in enumerate(predictions):
correct = int(gold_labels[i] == np.argmax(sample))
print("label: " + str(gold_labels[i]) + " - " + str(sample) + " - " + str(
gold_labels[i] == np.argmax(sample)))
result = (ensemble_name + ":" + str(i) + " " + str(np.argmax(sample)))
p.write(result + "\n")
e.write("%s on %d: %d\n" % (ensemble_name, i, correct))
if gold_labels[i] == np.argmax(sample):
total_acc += 1.0
print("total accuracy for " + ensemble_name + ": " + str(total_acc / len(gold_labels)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("ensemble_directory", help="Folder that contains all probabilites_{MODEL}.txt files"
" that should be incorporated into the ensemble")
parser.add_argument("gold_labels_file",
help="Contains correct answers for all evaluation set questions line-by-line"
" in the format 'data_conf.TEST_SET:question_id correct_answer_id'"
"question_id and answer_id are zero-indexed")
parser.add_argument("-ensemble_name", default="ensemble on validation set", help="Optional name for the ensemble,"
"for submitting test results"
"to the MovieQA sever use"
"'test'")
args = parser.parse_args()
ensemble_directory = args.ensemble_directory
gold_labels_file = args.gold_labels_file
# majority-vote ensemble predictions for each question
predictions_file = os.path.join(ensemble_directory, "predictions.txt")
# for each question 1 if ensemble predicts correct answer, else 0
evaluation_file = os.path.join(ensemble_directory, "evaluation.txt")
ensemble_name = args.ensemble_name
eval_ensemble(ensemble_directory, gold_labels_file, predictions_file, evaluation_file, ensemble_name)
| 3,799 | 45.341463 | 118 |
py
|
fat-albert
|
fat-albert-master/abmn/src/main.py
|
import sys
import os
import numpy as np
import argparse
sys.path.append('movieqa')
sys.path.append('glove')
sys.path.append('core')
def set_data_conf_values(model, mode, model_name, evaluation_file_version):
data_conf = model.data_conf
data_conf.MODEL_NAME = model_name
data_conf.TRAIN_DIR = data_conf.OUTPUT_DIR + '/train_' + data_conf.MODEL_NAME + '/'
data_conf.MODE = mode
print("mode %s, data_conf.MODE %s" % (mode, data_conf.MODE))
if evaluation_file_version:
data_conf.EVAL_FILE_VERSION = "_" + evaluation_file_version
data_conf.EVAL_MODIFIED_MODE = True
if mode in ['val', 'test']:
data_conf.MODE_MODEL_NAME_EVAL_FILE_VERSION = mode + '_' + model_name + data_conf.EVAL_FILE_VERSION
data_conf.EVAL_DIR = data_conf.OUTPUT_DIR + '/' + data_conf.MODE_MODEL_NAME_EVAL_FILE_VERSION
data_conf.EVAL_RECORD_PATH = data_conf.RECORD_DIR + '/' + mode + data_conf.EVAL_FILE_VERSION
print("conf.MODE_MODEL_NAME_EVAL_FILE_VERSION", data_conf.MODE_MODEL_NAME_EVAL_FILE_VERSION)
# use "qa.json" for validation + test, unless another eval_file_version is specified
if mode == 'val' and data_conf.EVAL_FILE_VERSION:
data_conf.EVAL_FILE = data_conf.DATA_PATH + '/data/qa_%s%s.json' % (mode, data_conf.EVAL_FILE_VERSION)
if not os.path.exists(data_conf.EVAL_RECORD_PATH):
print("Evaluating MovieQA in modified mode...")
print("The records for the evaluation data %s are created and will be stored at %s:"
% (data_conf.EVAL_FILE_VERSION, data_conf.EVAL_RECORD_PATH))
os.makedirs(data_conf.EVAL_RECORD_PATH)
import movieqa.preprocess as pp
new_vocab_size = pp.create_validation_dataset(data_conf.MODE)
if new_vocab_size and new_vocab_size > 0:
data_conf.VOCAB_SIZE = new_vocab_size
model.load_embeddings()
def main(mode, model_type, model_name, dropout, learning_rate, loss_function, batch_size, evaluation_file_version):
os.chdir("movieqa")
model = None
if model_type == "lstm":
from movieqa import run_lstm as model
elif model_type == "cnn":
from movieqa import run_cnn as model
elif model_type == "word-level-cnn":
from movieqa import run_cnn_word_level as model
set_data_conf_values(model, mode, model_name, evaluation_file_version)
model_conf = model.model_conf
if dropout:
model_conf.DROPOUT_RATE = dropout
if learning_rate:
model_conf.INITIAL_LEARNING_RATE = learning_rate
if loss_function:
model_conf.LOSS_FUNC = loss_function
if batch_size:
model_conf.BATCH_SIZE = batch_size
if mode == "train":
model.train_model()
else:
model.eval_model()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("mode", choices=["train", "val", "test"], help="Task to perform,"
"val tests on validation set, test on test set")
parser.add_argument("model_type", choices=["lstm", "cnn", "word-level-cnn"])
parser.add_argument("model_name", help="Name of model to be saved after training or loaded for evaluation")
parser.add_argument("-dropout", default=0.0, help="Dropout on the input embeddings")
parser.add_argument("-learning_rate", default=0.001, help="Learning rate for Adam optimizer")
parser.add_argument("-loss_function", choices=["_entropy_", "_hinge_"],
help="Type of loss function to compute error,"
"either cross entropy or hinge loss")
parser.add_argument("-batch_size", default=30)
parser.add_argument("-eval_file_version",
help="Model is evaluated on data/data/qa_MODE_{EVAL_FILE_VERSION}.json, empty default"
"evaluates on original val or test file")
parser.add_argument("-gpuid", default='-1', help="Id of GPU to run the model on (default: run only on CPU)")
args = parser.parse_args()
np.set_printoptions(linewidth=100000)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpuid
print("Run on GPU %s" % str(os.environ['CUDA_VISIBLE_DEVICES']))
main(args.mode, args.model_type, args.model_name, args.dropout, args.learning_rate, args.loss_function,
args.batch_size, args.eval_file_version)
| 4,431 | 41.209524 | 119 |
py
|
fat-albert
|
fat-albert-master/abmn/src/eval_ensemble_test.py
|
# compute accuracy of majority vote of all prediction files (probabilities_{MODEL_NANE} in ensemble directory
import glob
import numpy as np
import argparse
import os
def eval_ensemble(ensemble_directory, gold_labels_file, predictions_file, evaluation_file, ensemble_name):
# read the gold labels
with open(gold_labels_file) as f:
content = f.readlines()
gold_labels = [int(x.split()[1]) for x in content]
agreements = 0
disagreements = 0
# read the predictions of the individual models
predictions = np.zeros((len(gold_labels), 5))
model_prediction_files = glob.glob(ensemble_directory + "/probabilities_*")
print(model_prediction_files)
for model_prediction_file in model_prediction_files:
print("evaluating model %s" % model_prediction_file)
with open(model_prediction_file) as f:
content = f.readlines()
for i, line in enumerate(content[1:]):
pred_ind = int(line.split("\t")[1])
if model_prediction_file.endswith("probabilities_LSTM.txt"):
predictions[i][pred_ind] += 1
elif model_prediction_file.endswith("probabilities_CNN.txt"):
predictions[i][pred_ind] += 1
else:
predictions[i][pred_ind] += 1
predictions_bert = np.loadtxt(ensemble_directory + "/preds.txt")
counter = 0
for i in predictions_bert:
predictions[counter][int(i)] += 2.5
if predictions[counter][int(i)] == 5.5:
agreements = agreements + 1
elif predictions[counter][int(i)] == 2.5:
disagreements = disagreements + 1
counter = counter + 1
# compute majority votes and evaluate against gold standard
total_acc = 0.0
with open(predictions_file, "w") as p, open(evaluation_file, "w") as e:
for i, sample in enumerate(predictions):
correct = int(gold_labels[i] == np.argmax(sample))
print("label: " + str(gold_labels[i]) + " - " + str(sample) + " - " + str(
gold_labels[i] == np.argmax(sample)))
result = (ensemble_name + ":" + str(i) + " " + str(np.argmax(sample)))
p.write(result + "\n")
e.write("%s on %d: %d\n" % (ensemble_name, i, correct))
if gold_labels[i] == np.argmax(sample):
total_acc += 1.0
print("total accuracy for " + ensemble_name + ": " + str(total_acc / len(gold_labels)))
print("Agreements = " + str(agreements))
print("Disagreements = " + str(disagreements))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("ensemble_directory", help="Folder that contains all probabilites_{MODEL}.txt files"
" that should be incorporated into the ensemble")
parser.add_argument("gold_labels_file",
help="Contains correct answers for all evaluation set questions line-by-line"
" in the format 'data_conf.TEST_SET:question_id correct_answer_id'"
"question_id and answer_id are zero-indexed")
parser.add_argument("-ensemble_name", default="ensemble on validation set", help="Optional name for the ensemble,"
"for submitting test results"
"to the MovieQA sever use"
"'test'")
args = parser.parse_args()
ensemble_directory = args.ensemble_directory
gold_labels_file = args.gold_labels_file
# majority-vote ensemble predictions for each question
predictions_file = os.path.join(ensemble_directory, "predictions.txt")
# for each question 1 if ensemble predicts correct answer, else 0
evaluation_file = os.path.join(ensemble_directory, "evaluation.txt")
ensemble_name = args.ensemble_name
eval_ensemble(ensemble_directory, gold_labels_file, predictions_file, evaluation_file, ensemble_name)
| 4,134 | 44.944444 | 118 |
py
|
fat-albert
|
fat-albert-master/abmn/src/adversarial_sentence_level_black_box.py
|
"""
Calls the sentence-level black-box adversarial attacks,
which are our reimplementation and extension of Jia and Liang (2017)'s AddAny attack
"""
import glob
import sys
import os
import numpy as np
import argparse
os.chdir("movieqa")
from movieqa.adversarial_addAny import create_addAny_examples as create_addany
from movieqa.adversarial_addAny import eval_addAny as evaluate_addany
sys.path.append('movieqa')
sys.path.append('glove')
sys.path.append('core')
def main(mode, model, attack_type, models_folder, examples_folder, instances_to_attack):
filenames = glob.glob(os.path.join("outputs", models_folder) + "/*")
print("Running adversarial attack on models %s" % str(filenames))
total_acc = 0
for f in filenames:
f_examples_folder = os.path.join(f, examples_folder)
if mode == "create_examples":
print("create adversarial examples in %s for %s" % (examples_folder, f))
create_addany.run_creation(model, attack_type, f, f_examples_folder, instances_to_attack)
elif mode == "eval_examples":
print("evaluate adversarial examples from %s for %s" % (examples_folder, f))
acc = evaluate_addany.run_evaluation(model, attack_type, f, f_examples_folder, instances_to_attack)
total_acc += acc
print(total_acc / len(filenames))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("mode", choices=["create_examples", "eval_examples"],
help="Task to perform,"
"create_example creates adversarial sentences,"
"eval_examples evaluates the model's accuracy")
parser.add_argument("model_type", choices=["lstm", "cnn"], help="Word-level CNN model is currently not supported")
parser.add_argument("attack_type", choices=["addC", "addQ", "addA", "addQA"],
help="Controls the word pool from which the adversarial sentence is created;"
"addC: common words from 'common_english.txt';"
"addQ: common words + question words;"
"addA: common words + wrong answer candidate words;"
"addQA: common words + question words + wrong answer candidate words")
parser.add_argument("models_folder", help="Path to folder within movieqa/outputs directory that contains the"
"training directories of all models which should be attacked")
parser.add_argument("instances_to_attack", help="folder containing the preprocessed instances to be attacked"
" in .tfrecords and .pickle format (obtain them via preprocess.py)")
parser.add_argument("-examples_folder", default="addAny_sentences",
help="Name of subfolders within each attacked model_folder where"
"adversarial sentences are stored")
parser.add_argument("-gpuid", default='-1', help="Id of GPU to run the model on (default: run only on CPU)")
args = parser.parse_args()
np.set_printoptions(linewidth=100000)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpuid
print("Run on GPU %s" % str(os.environ['CUDA_VISIBLE_DEVICES']))
main(args.mode, args.model_type, args.attack_type, args.models_folder, args.examples_folder,
args.instances_to_attack)
| 3,443 | 46.833333 | 120 |
py
|
fat-albert
|
fat-albert-master/abmn/src/__init__.py
| 1 | 0 | 0 |
py
|
|
fat-albert
|
fat-albert-master/abmn/src/adversarial_white_box.py
|
import glob
import sys
import os
import numpy as np
import argparse
sys.path.append('movieqa')
sys.path.append('glove')
sys.path.append('core')
def main(mode, model_type, models_folder, dropout, learning_rate, loss_function, batch_size, attack_level,
num_modified_words, percentage_attacked_samples):
os.chdir("movieqa")
if model_type == "lstm":
from movieqa import run_lstm as model
elif model_type == "cnn":
from movieqa import run_cnn as model
elif model_type == "word-level-cnn":
from movieqa import run_cnn_word_level as model
from movieqa import run_adversarial_white_box as adversary
model_conf = model.model_conf
if dropout:
model_conf.DROPOUT_RATE = dropout
if learning_rate:
model_conf.INITIAL_LEARNING_RATE = learning_rate
if loss_function:
model_conf.LOSS_FUNC = loss_function
if batch_size:
model_conf.BATCH_SIZE = batch_size
filenames = glob.glob(os.path.join("outputs", models_folder) + "/*")
print("Running adversarial attack on models %s" % str(filenames))
for model_name in filenames:
set_data_conf_values(model, mode, model_name, attack_level)
if mode == "adversarial-train":
adversary.train_model(model_type, attack_level, num_modified_words, percentage_attacked_samples)
else:
adversary.eval_model(model_type, attack_level, num_modified_words, percentage_attacked_samples)
def set_data_conf_values(model, mode, model_name, attack_level):
# split of mode from model name first
model_name = os.path.basename(model_name).split("_")
if len(model_name) > 1:
model_name = "_".join(model_name[1:])
else:
model_name = model_name[0]
print("Model name %s" % model_name)
data_conf = model.data_conf
data_conf.MODEL_NAME = model_name
data_conf.TRAIN_DIR = data_conf.OUTPUT_DIR + '/train_' + data_conf.MODEL_NAME + '/'
data_conf.MODE = mode
print("mode %s, data_conf.MODE %s" % (mode, data_conf.MODE))
if mode in ['val', 'test']:
data_conf.MODE_MODEL_NAME_EVAL_FILE_VERSION = "%s%s_adversarial_%s-level_whitebox_%s" \
% (mode, data_conf.EVAL_FILE_VERSION, attack_level, model_name)
data_conf.EVAL_DIR = data_conf.OUTPUT_DIR + '/' + data_conf.MODE_MODEL_NAME_EVAL_FILE_VERSION
if not os.path.exists(data_conf.EVAL_DIR):
os.makedirs(data_conf.EVAL_DIR)
data_conf.EVAL_RECORD_PATH = data_conf.RECORD_DIR + '/' + mode + data_conf.EVAL_FILE_VERSION
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("mode", choices=["adversarial-train", "val", "test"],
help="Task to perform; val tests on validation set, test on test set")
parser.add_argument("model_type", choices=["lstm", "cnn", "word-level-cnn"])
parser.add_argument("models_folder", help="Path to folder within movieqa/outputs directory that contains the"
"training directories of all models which should be attacked"
"or adversarially trained")
parser.add_argument("attack_level", choices=["word", "sentence"])
parser.add_argument("-num_modified_words", choices=[1, 2, 3, 4, 5, 10, 20, 40], default=1, type=int,
help="Number of top k attended words in the most relevant sentence that "
"are modified by the attack (only relevant for attack_level=word)")
parser.add_argument("-percentage_attacked_samples", choices=range(0, 101), default=100, type=int,
help="Percentage of the instances in the dataset"
"that are attacked (0 = no attack) ->"
"(100 = all instances attacked)")
parser.add_argument("-dropout", default=0.0, help="Dropout on the input embeddings")
parser.add_argument("-learning_rate", default=0.001, help="Learning rate for Adam optimizer")
parser.add_argument("-loss_function", choices=["_entropy_", "_hinge_"],
help="Type of loss function to compute error,"
"either cross entropy or hinge loss")
parser.add_argument("-batch_size", default=30)
parser.add_argument("-gpuid", default='-1', help="Id of GPU to run the model on (default: run only on CPU)")
args = parser.parse_args()
np.set_printoptions(linewidth=100000)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpuid
print("Run on GPU %s" % str(os.environ['CUDA_VISIBLE_DEVICES']))
main(args.mode, args.model_type, args.models_folder, args.dropout, args.learning_rate, args.loss_function,
args.batch_size, args.attack_level, args.num_modified_words, args.percentage_attacked_samples)
| 4,867 | 43.254545 | 117 |
py
|
fat-albert
|
fat-albert-master/abmn/src/ensemble_folder/refine.py
|
import numpy as np
predictions = np.loadtxt('preds.txt')
for i in predictions:
print(int(i))
| 98 | 15.5 | 37 |
py
|
fat-albert
|
fat-albert-master/abmn/src/core/stats.py
|
def dumb_method_to_test():
print("yay")
class Statistics:
def __init__(self, name):
self.name = name
self.min = 100000000000.0
self.max = 0.0
self.total = 0
self.instances = 0
self.values = []
def update(self, value):
"""
updates min, max, total with the given value
:param value:
:param min:
:param max:
:param total:
:return:
"""
if value < self.min:
self.min = value
if value > self.max:
self.max = value
self.total += value
self.instances += 1
self.values.append(value)
def update_from_statistics_object(self, statistics):
if statistics.min < self.min:
self.min = statistics.min
if statistics.max > self.max:
self.max = statistics.max
self.total += statistics.total
self.instances += statistics.instances
self.values.extend(statistics.values)
def get_average(self):
if self.instances == 0:
return 0.0
else:
return self.total / float(self.instances)
def __str__(self):
string = "%s\t min:\t%.2f, max:\t%.2f, average:\t%.2f, instances:\t%d" % (
self.name, self.min, self.max, self.get_average(), self.instances)
return string
| 1,362 | 26.26 | 82 |
py
|
fat-albert
|
fat-albert-master/abmn/src/core/model.py
|
import re
import tensorflow as tf
initializer = "xavier"
def _activation_summary(x):
tensor_name = re.sub('%s_[0-9]*/', x.op.name)
tf.compat.v1.summary.histogram(tensor_name + '/activations', x)
tf.compat.v1.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
# Dropout operation
def dropout(x, training, rate):
print("Dropout: " + str(training))
res = tf.compat.v1.layers.dropout(x, rate=rate, training=training)
return res
# Preparing Layer operation, projects embeddings to lower dimension
def prep_embedding(x, hidden_size):
left = tf.compat.v1.layers.dense(x, hidden_size, activation=tf.nn.sigmoid, kernel_initializer=get_initializer(initializer),
bias_initializer=tf.compat.v1.constant_initializer(value=0))
right = tf.compat.v1.layers.dense(x, hidden_size, activation=tf.nn.tanh, kernel_initializer=get_initializer(initializer),
bias_initializer=tf.compat.v1.constant_initializer(value=0))
mult = tf.multiply(left, right)
return mult
# Attention Layer operation, input x2 is weighted with input x1
def prep_attention(x1, x2, hidden_size):
# original approach by Wang and Jiang, but sometimes leads to bad performance here
# left = tf.layers.dense(x1, hidden_size)
# m = tf.matmul(left, x2, transpose_b=True)
m = tf.matmul(x1, x2, transpose_b=True)
g = tf.nn.softmax(tf.transpose(a=m))
h = tf.matmul(g, x1)
return h
# SUBMULT comparison operation
def compare_submult(x1, x2, hidden_size):
sub = tf.subtract(x1, x2)
pow = tf.multiply(sub, sub)
mult = tf.multiply(x1, x2)
con = tf.concat([pow, mult], 1)
nn = tf.compat.v1.layers.dense(con, hidden_size, activation=tf.nn.relu, kernel_initializer=get_initializer(initializer),
bias_initializer=tf.compat.v1.constant_initializer())
return nn
# MULT comparison operation
def compare_mult(x1, x2):
return tf.multiply(x1, x2)
# CNN layer operation, returns also attention weighted word sequences for visualization
# Warning: Only use padding=SAME at the moment, otherwise attention visualization will throw an error.
# param filter_visualization obsolete, aggregate all filters now
def cnn(x, filter_sizes, hidden_size, filter_visualization=3, padding='SAME'):
"""
:param x: input of size [first_dim x num_words x 2*hidden_size]
for example in the 1st stage we have:
q: [1 x num_words x 2* hidden_size]
a: [num_answers x num_words x 2* hidden_size]
p: [num_sentences x num_words x 2* hidden_size]
:param filter_sizes:
:param hidden_size:
:param filter_visualization:
:param padding: SAME means the 2nd dimension of the output of the conv1d is the same as its input (num_words)
:return: concatenated 1dconv outputs for each filter in filter_sizes
con dimensions: [first_dim x hidden_size * num_filter_sizes]
"""
merge = []
attention_vis = []
for filter_size in filter_sizes:
# conv_branch: [first_dim x num_words x 1* hidden_size]
conv_branch = tf.compat.v1.layers.conv1d(
inputs=x,
# use as many filters as the hidden size
filters=hidden_size,
kernel_size=[filter_size],
use_bias=True,
activation=tf.nn.relu,
trainable=True,
padding=padding,
kernel_initializer=get_initializer(initializer),
bias_initializer=tf.compat.v1.constant_initializer(),
name='conv_' + str(filter_size)
)
attention_vis.append(conv_branch)
# pool over the words to obtain: [first_dim x 1* hidden_size]
pool_branch = tf.reduce_max(input_tensor=conv_branch, axis=1)
merge.append(pool_branch)
# num_filter_sizes * [first_dim x hidden_size] -> [first_dim x hidden_size * num_filter_sizes]
con = tf.concat(merge, axis=1)
attention_vis = tf.stack(attention_vis, axis=0)
attention_vis = tf.reduce_mean(input_tensor=attention_vis, axis=0)
return con, attention_vis
def lstm(inputs, hidden_size, mode='lstm'):
"""
RNN part of the aggregation function
:param inputs:
:param hidden_size:
:param mode: [lstm: unidirectional lstm, gru: unidirectional gru, bi: bidirectional lstm]
in the paper, we only report results with the unidirectional lstm (default setting here)
:return:
"""
# unidirectional lstm or gru
if mode == 'lstm' or mode == 'gru':
cell = get_cell(mode, hidden_size)
output, _ = tf.compat.v1.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
output = tf.reduce_max(input_tensor=output, axis=1)
print("MODE: Reduce unidirectional " + mode)
# bidirectional lstm
else:
cell1 = get_cell('lstm', hidden_size)
cell2 = get_cell('lstm', hidden_size)
output, _ = tf.compat.v1.nn.bidirectional_dynamic_rnn(cell1, cell2, inputs, dtype=tf.float32)
output_fw, output_bw = output
if mode == "bi":
output = tf.concat([output_fw, output_bw], 2)
output = tf.reduce_max(input_tensor=output, axis=1)
print("MODE: Reduce Bidirectional " + mode)
return output
# Prediction layer, computes final scores for answer candidates
def softmax_prep(x, hidden_size):
# [num_answers x Y] -> [num_answers x hidden_size]
inner = tf.compat.v1.layers.dense(x, hidden_size, activation=tf.nn.tanh, kernel_initializer=get_initializer(initializer),
bias_initializer=tf.compat.v1.constant_initializer(0))
# [num_answers x Y] -> [num_answers x 1]
lin = tf.compat.v1.layers.dense(inner, 1, kernel_initializer=get_initializer(initializer),
bias_initializer=tf.compat.v1.constant_initializer(0))
return lin
# return global step for model savers
def get_global_step():
with tf.device('/cpu:0'):
gs = tf.compat.v1.get_variable('global_step', initializer=tf.constant(0), dtype=tf.int32)
return gs
# Parameter update operation with TensorBoard logging
def update_params(total_loss, global_step, opt_name, learning_rate):
optimizer = get_optimizer(opt_name, learning_rate)
grads = optimizer.compute_gradients(total_loss)
update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
apply_gradient_op = optimizer.apply_gradients(grads, global_step=global_step)
for var in tf.compat.v1.trainable_variables():
tf.compat.v1.summary.histogram(var.op.name, var)
for grad, var in grads:
if grad is not None:
tf.compat.v1.summary.histogram(var.op.name + '/gradients', grad)
with tf.control_dependencies([apply_gradient_op]):
train_op = tf.no_op(name='train')
return train_op
# loss function for hinge loss
def compute_hinge_loss_sample(data):
logits, labels = data
H = tf.reduce_max(input_tensor=logits * (1 - labels), axis=0)
L = tf.nn.relu((1 - logits + H) * labels)
final_loss = tf.reduce_mean(input_tensor=tf.reduce_max(input_tensor=L, axis=0))
return final_loss
# loss function for cross entropy loss
def compute_entropy_loss_sample(data):
logits, labels = data
final_loss = tf.nn.softmax_cross_entropy_with_logits(labels=tf.stop_gradient(labels), logits=logits)
return final_loss
def compute_batch_mean_loss(logits, labels, loss_func):
if loss_func == "hinge":
print("Loss: HINGE")
loss = tf.map_fn(compute_hinge_loss_sample, elems=[logits, labels], dtype=tf.float32)
elif loss_func == "entropy":
print("Loss: ENTROPY")
loss = tf.map_fn(compute_entropy_loss_sample, elems=[logits, labels], dtype=tf.float32)
else:
print("Loss: ENTROPY")
loss = tf.map_fn(compute_entropy_loss_sample, elems=[logits, labels], dtype=tf.float32)
# apply L2 regularization (only for weights, not for bias)
vars = tf.compat.v1.trainable_variables()
vars_f = [v for v in vars if 'embedding' not in v.name]
lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in vars_f
if 'bias' not in v.name]) * 0.0001
loss_mean = tf.reduce_mean(input_tensor=loss + lossL2, name='batch_loss')
return loss_mean
# accuracy computation op
def compute_accuracies(logits, labels, dim):
probabs = tf.nn.softmax(logits)
l_cast = tf.cast(labels, dtype=tf.int64)
correct_prediction = tf.equal(tf.argmax(input=probabs, axis=dim), tf.argmax(input=l_cast, axis=dim))
accuracy = tf.cast(correct_prediction, tf.float32)
return accuracy
# probability computation op with softmax
def compute_probabilities(logits):
# print("logits %s" % str(logits))
# logits = tf.Print(logits, [logits], message="logits to compute softmax")
probs = tf.nn.softmax(logits)
return probs
# casting all labels > 0 to 1 (needed only for Wikiqa with multiple correct answers)
def cast_labels(labels):
zero = tf.cast(0.0, dtype=tf.float32)
l_cast = tf.cast(labels, dtype=tf.float32)
zeros = tf.zeros_like(labels)
condition = tf.greater(l_cast, zero)
res = tf.compat.v1.where(condition, tf.ones_like(labels), zeros)
return res
# get weight initializer op
def get_initializer(name):
if (name == "variance"):
return tf.compat.v1.variance_scaling_initializer()
elif (name == "normal"):
return tf.compat.v1.random_normal_initializer(stddev=0.1)
elif (name == "uniform"):
return tf.compat.v1.random_uniform_initializer(minval=-0.1, maxval=0.1)
elif (name == "truncated"):
return tf.compat.v1.truncated_normal_initializer(stddev=0.1)
elif (name == "xavier"):
return tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform")
else:
return tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform")
# get optimizer op
# (Adamax support has been removed after optimizer testing since implementation was not compatible to newer Tensorflow version)
def get_optimizer(name, lr):
if (name == "adam"):
print("optimizer: ADAM")
return tf.compat.v1.train.AdamOptimizer(learning_rate=lr)
elif (name == "sgd"):
print("optimizer: SGD")
return tf.compat.v1.train.GradientDescentOptimizer(learning_rate=(lr))
else:
return tf.compat.v1.train.AdamOptimizer(learning_rate=lr)
def get_cell(mode, hidden_size):
if (mode == "lstm"):
return tf.compat.v1.nn.rnn_cell.LSTMCell(hidden_size)
elif (mode == "gru"):
return tf.compat.v1.nn.rnn_cell.GRUCell(hidden_size)
else:
return tf.compat.v1.nn.rnn_cell.LSTMCell(hidden_size)
| 10,768 | 38.16 | 127 |
py
|
fat-albert
|
fat-albert-master/abmn/src/core/util.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# contains utility functions, also takes care of creation of vocabulary and word embedding files
import numpy as np
import tensorflow as tf
import re
import os
import time
#from tensorflow.contrib.tensorboard.plugins import projector
from distutils.dir_util import copy_tree
from urllib.request import urlretrieve
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import zipfile
import _pickle as pickle
vocab = {}
vectors = {}
# logging of question types, but no further usage in this work
question_types = {
"what": 0,
"who": 1,
"why": 2,
"how": 3,
"where": 4
}
def get_question_keys():
keys = ["other", "what", "who", "why", "how", "where"]
return keys
# first entry of vocab reserved for empty word (zero vector for padding)
def init_vocab(dim):
vocab[''] = 0
vectors[0] = np.zeros(dim)
def restore_vocab(embedding_dir):
if (os.path.exists(embedding_dir + "/vocab.pickle") and os.path.exists(
embedding_dir + "/vectors.pickle")):
global vocab, vectors
vectors_array, rev_vocab = load_embeddings(embedding_dir)
vectors = {k: v for k, v in enumerate(vectors_array)}
vocab = dict(zip(rev_vocab.values(), rev_vocab.keys()))
# load embeddings from files
def load_embeddings(embedding_dir):
print("Loading vectors.pickle and vocab.pickle from %s" % embedding_dir)
with open(embedding_dir + "/vectors.pickle", 'rb') as handle:
loaded_vectors = pickle.load(handle)
with open(embedding_dir + "/vocab.pickle", 'rb') as handle2:
loaded_vocab = pickle.load(handle2)
print("Loaded vocab of length %d" % len(loaded_vocab))
return loaded_vectors, loaded_vocab
# load glove model into dictionary
def loadGloveModel(gloveFile):
print("Loading pretrained GloVe embeddings from %s" % gloveFile)
word2vec = {}
fin = open(gloveFile, encoding="utf8")
for i, line in enumerate(fin):
items = line.replace('\r', '').replace('\n', '').split(' ')
if len(items) < 10:
continue
word = items[0]
vect = np.array([float(i) for i in items[1:] if len(i) >= 1])
word2vec[word] = vect.tolist()
if i % 10000 == 0:
print("Loaded %d vectors already" %i)
return word2vec
# remove special characters and lowercase words for finding them in GloVe
def normalize_text(s):
special_chars = ',`´&.:!?;()$\"\''
norm = ''.join(re.findall('[^' + special_chars + ']', s)).strip().lower()
norm = list(filter(None, norm.split()))
# print(norm)
return norm
def get_word_vector(embeddings, word, size, warn_no_embeddings=False):
"""
gets index of the word in the stored vocabulary, or updates the vocabulary if the word is not stored yet
:param embeddings:
:param word:
:param size:
:param warn_no_embeddings: prints warning if word is not in vocabulary yet and no pretrained embeddings are provided
:return:
"""
# print("vocab with %d entries" % len(vocab))
if word in vocab:
index = vocab[word]
else:
index = len(vocab)
if not embeddings and warn_no_embeddings:
print("New word %s in vocab recognized, please provide pretrained embeddings to look for this word's vector"
% word)
return False
elif word in embeddings:
vec = embeddings[word]
# TODO unknown words during evaluation are each assigned their own random vector
# TODO could implement a vocabulary entry for unknown words
else:
vec = np.random.uniform(-1.3, 1.3, size)
vocab[word] = index
vectors[index] = vec
return index
# save vocab end word embedding representations
def save_embeddings(embedding_dir, dim):
if not os.path.exists(embedding_dir):
os.makedirs(embedding_dir)
embedding = np.empty((len(vocab), dim), dtype=np.float32)
for key, value in vocab.items():
embedding[value] = vectors[value]
rev_vocab = dict(zip(vocab.values(), vocab.keys()))
with open(embedding_dir + '/vocab.pickle', 'wb') as handle:
pickle.dump(rev_vocab, handle)
with open(embedding_dir + '/vectors.pickle', 'wb') as handle:
pickle.dump(embedding, handle)
print("Saved embeddings to %s with vocab size %d" % (embedding_dir, len(vocab)))
return len(rev_vocab)
# create backup copy of model after each training epoch
def copy_model(src, gs):
dest = src + "_" + str(gs)
copy_tree(src, dest)
# save validation / testing results to score file
def save_eval_score(entry):
with open('outputs/scores.txt', 'a') as f:
f.write(entry + '\n')
# save a copy of the config file's settings to the model folder to save the used hyperparams
def save_config_values(module, target):
target += "_config.txt"
if not tf.io.gfile.exists(target):
vars = {}
if module:
vars = {key: value for key, value in module.__dict__.items() if
not (key.startswith('__') or key.startswith('_'))}
file = open(target, "a")
for key, value in vars.items():
file.write(str(key) + " : " + str(value) + "\n")
# download data set from url and save to filesystem if not present yet
def download_data(url, target):
urlretrieve(url, target + "/data.zip")
with zipfile.ZipFile(target + "/data.zip", "r") as zip_ref:
zip_ref.extractall(target)
def _to_valid_filename(str_):
str_ = re.sub('[^\w\s-]', '', str_).strip().lower()
str_ = re.sub('[-\s]+', '-', str_)
if len(str_) > 200:
str_ = str_[:100] + str_[len(str_) - 100:]
return str_
# create attention visualization and save as plot image
def plot_attention(value, a_words, title_text, y_lab, savepath, filename):
value, a_words = words2chars(value, a_words)
value = value[::-1]
a_words = a_words[::-1]
y_lab = y_lab[::-1]
filename = savepath + "/" + _to_valid_filename(filename) + ".png"
if not os.path.isfile(filename):
if not os.path.exists(savepath):
os.makedirs(savepath)
w_count = min(200, value.shape[1])
plt.clf()
a_words = a_words[:, :w_count]
value = value[:, :w_count]
x = []
for i in range(0, w_count):
x.append(i)
plt.figure(figsize=(19, 8))
heatmap = plt.pcolor(value, cmap="YlOrRd")
for y in range(value.shape[0]):
for z in range(value.shape[1]):
plt.text(z + 0.5, y + 0.5, a_words[y, z],
horizontalalignment='center',
verticalalignment='center', )
plt.colorbar(heatmap)
plt.ylabel('sentence')
plt.xlabel('word')
plt.title(title_text)
plt.autoscale(True)
plt.xticks(x, a_words[0], rotation='vertical')
plt.yticks(range(0, value.shape[0]), y_lab)
plt.axes().get_xaxis().set_visible(False)
# plt.show()
plt.savefig(filename)
plt.close()
# helper function for visualization, convert words to chars with values for more fine-grained grid in heatmap
def words2chars(values, words):
sents = []
char_vals = []
for i, sent in enumerate(words):
chars = []
char_val = []
val = values[i]
for j, word in enumerate(sent):
value = val[j]
for k, ch in enumerate(word):
chars.append(ch)
char_val.append(value)
chars.append(" ")
char_val.append(value)
char_vals.append(char_val)
sents.append(chars)
chars_pad = np.empty([len(sents), len(max(sents, key=lambda x: len(x)))], dtype=np.chararray)
chars_pad[:] = ''
vals_pad = np.zeros_like(chars_pad, dtype=np.float32)
for i, j in enumerate(sents):
chars_pad[i][0:len(j)] = j
for i, j in enumerate(char_vals):
vals_pad[i][0:len(j)] = j
return vals_pad, chars_pad
# mean average precision for batch
def average_precision(probabs, labels, a_counts):
m_ap = 0.0
for i, lab in enumerate(labels):
ap = example_precision(logits=probabs[i], labels=labels[i], a_count=a_counts[i])
m_ap += ap
m_ap = m_ap / len(labels)
return np.float32(m_ap)
# mean reciprocal rank for batch
def average_rank(probabs, labels, a_counts):
m_ar = 0.0
for i, lab in enumerate(labels):
ar = example_rank(logits=probabs[i], labels=labels[i], a_count=a_counts[i])
# print(ap)
m_ar += ar
m_ar = m_ar / len(labels)
return np.float32(m_ar)
# mean reciprocal rank for single sample
def example_rank(logits, labels, a_count):
labels = labels[:a_count]
logits = logits[:a_count]
mrr = 0
extracted = {}
for i, label in enumerate(labels):
if label > 0.0:
extracted[i] = 1
indices = np.argsort(logits)[::-1]
for j, index in enumerate(indices):
if index in extracted:
mrr = 1 / (j + 1)
break
if (mrr > 0):
return mrr
else:
return 0.0
# mean average precision for single sample
def example_precision(logits, labels, a_count):
labels = labels[:a_count]
logits = logits[:a_count]
map_idx = 0
map_val = 0
extracted = {}
for i, label in enumerate(labels):
if label > 0.0:
extracted[i] = 1
indices = np.argsort(logits)[::-1]
for j, index in enumerate(indices):
if index in extracted:
map_idx = map_idx + 1
map_val = map_val + (map_idx / (j + 1))
if (map_idx > 0):
map_val = map_val / map_idx
return map_val
else:
return 0.0
def print_predictions(outfolder, step, gold, predicted_probabilities, split):
prediction = np.argmax(predicted_probabilities)
correct = int(prediction == gold)
if split == "val":
line = "question %d\t%d\t%d\t%d\t%s" % (step, gold, prediction, correct, str(predicted_probabilities))
else:
line = "question %d\t%d\t%s" % (step, prediction, str(predicted_probabilities))
# header, overwrite old file
if step == 0:
mode = "w"
if split == "val":
header = "question\tgold\tpredicted\tcorrect\tpredicted probabilities"
else:
header = "question\tpredicted\tpredicted probabilities"
line = "%s\n%s" % (header, line)
# append info to file
else:
mode = "a"
with open(outfolder + "/probabilities.txt", mode) as file:
file.write(line + "\n")
def print_sentence_attentions(outfolder, step, attention):
# overwrite old file
if step == 0:
mode = "w"
# append to file
else:
mode = "a"
with open(outfolder + "/attentions.txt", mode) as file:
# write header
if step == 0:
file.write("question\tanswer\tsentence attention distribution\n")
for i, att in enumerate(attention):
file.write("question %d\t%d\t%s\n" % (step, i, str(attention[i])))
| 11,079 | 30.036415 | 120 |
py
|
fat-albert
|
fat-albert-master/abmn/src/core/__init__.py
| 1 | 0 | 0 |
py
|
|
fat-albert
|
fat-albert-master/abmn/src/movieqa/conf_cnn.py
|
# hierarchical CNN training config
# model config
FILTER_SIZES = [1, 3, 5]
HIDDEN_SIZE = 150
# train config
INITIAL_LEARNING_RATE = 0.001
BATCH_SIZE = 30
MAX_STEPS = 50000
DROPOUT_RATE = 0.0
LOSS_FUNC = 'entropy'
OPTIMIZER = "adam"
NUM_EPOCHS = 1
| 249 | 15.666667 | 34 |
py
|
fat-albert
|
fat-albert-master/abmn/src/movieqa/conf_cnn_word_level.py
|
# word-level-only CNN training config
#model config
FILTER_SIZES = [1,3,5]
HIDDEN_SIZE = 150
# train config
INITIAL_LEARNING_RATE = 0.002
BATCH_SIZE = 30
MAX_STEPS = 50000
DROPOUT_RATE = 0.0
LOSS_FUNC = 'entropy'
OPTIMIZER = "adam"
NUM_EPOCHS = 1
| 249 | 16.857143 | 37 |
py
|
fat-albert
|
fat-albert-master/abmn/src/movieqa/run_lstm.py
|
# train and evaluation functions for hierarchical LSTM model
import glob
import os
import tensorflow as tf
tf.compat.v1.disable_v2_behavior()
import time
import core.model as model
import core.util as util
import movieqa.data_conf as data_conf
import movieqa.conf_lstm as model_conf
import numpy as np
from tensorflow.python import pywrap_tensorflow
if not os.path.exists(data_conf.RECORD_DIR):
os.makedirs(data_conf.RECORD_DIR)
import movieqa.preprocess as pp
pp.create_complete_dataset()
def load_embeddings():
global vectors, vocab, embeddings, place, set_embeddings_op
vectors, vocab = util.load_embeddings(data_conf.EMBEDDING_DIR)
data_conf.VOCAB_SIZE = len(vectors)
# init word embeddings
embeddings = tf.Variable(
tf.random.uniform([data_conf.VOCAB_SIZE, data_conf.EMBEDDING_SIZE], -1.3, 1.3), name="embeddings",
trainable=False)
place = tf.compat.v1.placeholder(tf.float32, shape=embeddings.shape)
set_embeddings_op = tf.compat.v1.assign(embeddings, place, validate_shape=True)
load_embeddings()
ANSWER_COUNT = 5
dropout_op = tf.compat.v1.make_template(name_='dropout', func_=model.dropout)
prepare_op = tf.compat.v1.make_template(name_='prepare_embedding_a', func_=model.prep_embedding)
attention_op = tf.compat.v1.make_template(name_='prepare_attention', func_=model.prep_attention)
compare_op = tf.compat.v1.make_template(name_='compare', func_=model.compare_submult)
compare_op_2 = tf.compat.v1.make_template(name_='compare_2', func_=model.compare_submult)
convolution_op = tf.compat.v1.make_template(name_='convolution', func_=model.cnn)
convolution_op_2 = tf.compat.v1.make_template(name_='convolution1', func_=model.cnn)
soft_prep_op = tf.compat.v1.make_template(name_='softmax', func_=model.softmax_prep)
update_op = tf.compat.v1.make_template(name_='update', func_=model.update_params)
lstm_op = tf.compat.v1.make_template(name_='lstm1', func_=model.lstm)
lstm_op_2 = tf.compat.v1.make_template(name_='lstm2', func_=model.lstm)
# load word embedding representation for vocab indices
def get_emb(indices):
zero = tf.cast(0, dtype=tf.int64)
zeros = tf.zeros(shape=(tf.shape(input=indices)[0], data_conf.EMBEDDING_SIZE))
condition = tf.greater(indices, zero)
res = tf.compat.v1.where(condition, tf.nn.embedding_lookup(params=embeddings, ids=indices), zeros)
return res
# main predict op for batch
def predict_batch(data, training):
def predict_step(data):
sample = data
with tf.compat.v1.name_scope("question"):
q = sample[0]
q = get_emb(q)
with tf.compat.v1.name_scope("answers"):
aws = sample[1]
aws = tf.map_fn(get_emb, aws, dtype=tf.float32)
with tf.compat.v1.name_scope("plot"):
p = sample[2]
# Keep the word ids for the plot for printing the original words
# (needed in run_adversarial because plot words are changed in the graph)
p_word_indices = p
p = tf.map_fn(get_emb, p, dtype=tf.float32)
p_drop = dropout_op(p, training, model_conf.DROPOUT_RATE)
p_drop = tf.reshape(p_drop, shape=(tf.shape(input=p)[0], -1, data_conf.EMBEDDING_SIZE))
p_prep = prepare_op(p_drop, model_conf.HIDDEN_SIZE)
q_drop = dropout_op(q, training, model_conf.DROPOUT_RATE)
q_prep = prepare_op(q_drop, model_conf.HIDDEN_SIZE)
aws_drop = dropout_op(aws, training, model_conf.DROPOUT_RATE)
aws_drop = tf.reshape(aws_drop, shape=(ANSWER_COUNT, -1, data_conf.EMBEDDING_SIZE))
aws_prep = prepare_op(aws_drop, model_conf.HIDDEN_SIZE)
# stage one
with tf.compat.v1.name_scope("plot_loop"):
def p_sent_step(p_sent):
h = attention_op(q_prep, p_sent, model_conf.HIDDEN_SIZE)
t = compare_op(p_sent, h, model_conf.HIDDEN_SIZE)
with tf.compat.v1.name_scope("answer_loop"):
def a_step(a):
h2 = attention_op(a, p_sent, model_conf.HIDDEN_SIZE)
t2 = compare_op(p_sent, h2, model_conf.HIDDEN_SIZE)
return t2
a_feats = tf.map_fn(a_step, elems=aws_prep, name="answer_step")
return t, a_feats
tqs, tas = tf.map_fn(p_sent_step, elems=p_prep, dtype=(tf.float32, tf.float32), name="plot_step")
with tf.compat.v1.name_scope("prepare_convolution_stage_1"):
tas = tf.einsum('ijkl->jikl', tas)
q_prep = tf.expand_dims(q_prep, 0)
pq_con = tf.concat([q_prep, q_prep], axis=2)
pa_con = tf.concat([aws_prep, aws_prep], axis=2)
q_sent_feats = lstm_op(pq_con, model_conf.HIDDEN_SIZE)
a_sent_feats = lstm_op(pa_con, model_conf.HIDDEN_SIZE)
with tf.compat.v1.name_scope("answer_loop"):
def a_conv(elems):
ta, a_sent = elems
con = tf.concat([tqs, ta], axis=2)
word_att = tf.reduce_mean(input_tensor=con, axis=2)
pqa_sent_feats = lstm_op(con, model_conf.HIDDEN_SIZE)
# stage two from here on sentence level
hq_sent = attention_op(q_sent_feats, pqa_sent_feats, model_conf.HIDDEN_SIZE)
a_sent = tf.expand_dims(a_sent, 0)
ha_sent = attention_op(a_sent, pqa_sent_feats, model_conf.HIDDEN_SIZE)
tq_sent = compare_op_2(pqa_sent_feats, hq_sent, model_conf.HIDDEN_SIZE)
ta_sent = compare_op_2(pqa_sent_feats, ha_sent, model_conf.HIDDEN_SIZE)
sent_feats = tf.concat([tq_sent, ta_sent], 1)
return sent_feats, word_att
t_sent, word_atts = tf.map_fn(a_conv, elems=[tas, a_sent_feats], dtype=(tf.float32, tf.float32))
r_final_feats = lstm_op_2(t_sent, model_conf.HIDDEN_SIZE)
result = soft_prep_op(r_final_feats, model_conf.HIDDEN_SIZE)
result = tf.reshape(result, [-1])
sent_atts = tf.reduce_mean(input_tensor=t_sent, axis=2)
sent_soft = tf.nn.softmax(sent_atts)
return result, word_atts, sent_soft, p_word_indices
predict_step_op = tf.compat.v1.make_template(name_='predict_step', func_=predict_step)
batch_predictions, batch_w_atts, batch_s_atts, batch_p_word_indices = tf.map_fn(fn=predict_step_op,
elems=data, infer_shape=False,
dtype=(
tf.float32, tf.float32,
tf.float32,
tf.int64))
return batch_predictions, batch_w_atts, batch_s_atts, batch_p_word_indices
# get single example from set's record files
def get_single_sample(sample):
context_features = {
"question_size": tf.io.FixedLenFeature([], dtype=tf.int64),
"question_type": tf.io.FixedLenFeature([], dtype=tf.int64),
"movie_id": tf.io.FixedLenFeature([], dtype=tf.string),
}
sequence_features = {
"question": tf.io.VarLenFeature(dtype=tf.int64),
"labels": tf.io.VarLenFeature(dtype=tf.float32),
"answers": tf.io.VarLenFeature(dtype=tf.int64),
"plot": tf.io.VarLenFeature(dtype=tf.int64)
}
context_parsed, sequence_parsed = tf.io.parse_single_sequence_example(
serialized=sample,
context_features=context_features,
sequence_features=sequence_features
)
label = sequence_parsed['labels']
answers = sequence_parsed['answers']
plot = sequence_parsed['plot']
question = sequence_parsed['question']
question_type = context_parsed['question_type']
movie_id = context_parsed['movie_id']
plot = tf.sparse.to_dense(plot)
answers = tf.sparse.to_dense(answers)
question = tf.sparse.to_dense(question)
label = tf.sparse.to_dense(label)
answers = tf.reshape(answers, shape=[ANSWER_COUNT, -1])
label = tf.reshape(label, shape=[ANSWER_COUNT])
question = tf.reshape(question, shape=[-1])
return question, answers, label, movie_id, plot, question_type
# main training function for one epoch
def train_model():
print("train RNN-LSTM model")
global_step = tf.compat.v1.train.get_or_create_global_step()
if not tf.io.gfile.exists(data_conf.TRAIN_DIR):
print("RESTORING WEIGHTS")
tf.io.gfile.makedirs(data_conf.TRAIN_DIR)
util.save_config_values(data_conf, data_conf.TRAIN_DIR + "/data")
util.save_config_values(model_conf, data_conf.TRAIN_DIR + "/model")
filenames = glob.glob(data_conf.TRAIN_RECORD_PATH + '/*')
print("Reading training dataset from %s" % filenames)
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(get_single_sample)
dataset = dataset.shuffle(buffer_size=9000)
dataset = dataset.repeat(model_conf.NUM_EPOCHS)
batch_size = model_conf.BATCH_SIZE
dataset = dataset.padded_batch(model_conf.BATCH_SIZE, padded_shapes=(
[None], [ANSWER_COUNT, None], [None], (), [None, None], ()))
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
next_q, next_a, next_l, next_plot_ids, next_plots, next_q_types = iterator.get_next()
logits, _, _, _ = predict_batch([next_q, next_a, next_plots], training=True)
probabs = model.compute_probabilities(logits=logits)
loss_batch = model.compute_batch_mean_loss(logits, next_l, model_conf.LOSS_FUNC)
accuracy = model.compute_accuracies(logits=logits, labels=next_l, dim=1)
accuracy_batch = tf.reduce_mean(input_tensor=accuracy)
tf.compat.v1.summary.scalar("train_accuracy", accuracy_batch)
tf.compat.v1.summary.scalar("train_loss", loss_batch)
training_op = update_op(loss_batch, global_step, model_conf.OPTIMIZER, model_conf.INITIAL_LEARNING_RATE)
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.graph_options.optimizer_options.global_jit_level = tf.compat.v1.OptimizerOptions.OFF
with tf.compat.v1.train.MonitoredTrainingSession(
checkpoint_dir=data_conf.TRAIN_DIR,
save_checkpoint_secs=60,
save_summaries_steps=5,
hooks=[tf.estimator.StopAtStepHook(last_step=model_conf.MAX_STEPS),
], config=config) as sess:
step = 0
total_acc = 0.0
# print("Feeding embeddings %s of size %s" % (str(vectors), len(vectors)))
sess.run(set_embeddings_op, feed_dict={place: vectors})
while not sess.should_stop():
_, loss_val, acc_val, probs_val, lab_val, gs_val = sess.run(
[training_op, loss_batch, accuracy_batch, probabs, next_l, global_step])
print(probs_val)
print(lab_val)
print("Batch loss: " + str(loss_val))
print("Batch acc: " + str(acc_val))
step += 1
total_acc += acc_val
print("Total acc: " + str(total_acc / step))
print("Local_step: " + str(step * batch_size))
print("Global_step: " + str(gs_val))
print("===========================================")
util.copy_model(data_conf.TRAIN_DIR, gs_val)
# main eval / testing function
def eval_model():
if not tf.io.gfile.exists(data_conf.EVAL_DIR):
tf.io.gfile.makedirs(data_conf.EVAL_DIR)
util.save_config_values(data_conf, data_conf.EVAL_DIR + "/data_")
util.save_config_values(model_conf, data_conf.EVAL_DIR + "/model_")
filepath = data_conf.EVAL_RECORD_PATH + '/*'
filenames = glob.glob(filepath)
global_step = tf.compat.v1.train.get_or_create_global_step()
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(get_single_sample)
batch_size = 1
dataset = dataset.padded_batch(batch_size, padded_shapes=(
[None], [ANSWER_COUNT, None], [None], (), [None, None], ()))
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
next_q, next_a, next_l, next_plot_ids, next_plots, next_q_types = iterator.get_next()
logits, word_atts, sent_atts, pl_d = predict_batch([next_q, next_a, next_plots], training=False)
next_q_types = tf.reshape(next_q_types, ())
probabs = model.compute_probabilities(logits=logits)
loss_example = model.compute_batch_mean_loss(logits, next_l, model_conf.LOSS_FUNC)
accuracy_example = tf.reduce_mean(input_tensor=model.compute_accuracies(logits=logits, labels=next_l, dim=1))
# do not restore embeddings in case the vocabulary size has changed
#to_restore = tf.contrib.slim.get_variables_to_restore(exclude=["embeddings"])
saver = tf.compat.v1.train.Saver()
summary_writer = tf.compat.v1.summary.FileWriter(data_conf.TRAIN_DIR)
step = 0
total_acc = 0.0
total_loss = 0.0
p_counts = 0
last_p = ''
type_counts = np.zeros(6, dtype=np.int32)
type_accs = np.zeros(6)
with tf.compat.v1.Session() as sess:
init_op = tf.group(tf.compat.v1.global_variables_initializer(), tf.compat.v1.local_variables_initializer())
sess.run(init_op)
ckpt = tf.train.get_checkpoint_state(data_conf.TRAIN_DIR)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('No checkpoint file found')
coord = tf.train.Coordinator()
threads = tf.compat.v1.train.start_queue_runners(sess=sess, coord=coord)
sess.run(set_embeddings_op, feed_dict={place: vectors})
try:
while not coord.should_stop():
loss_val, acc_val, probs_val, gs_val, q_type_val, q_val, labels_val, p_val, a_val, p_id_val, atts_val, \
sent_atts_val = sess.run([loss_example, accuracy_example, probabs, global_step, next_q_types, next_q,
next_l, next_plots, next_a, next_plot_ids, word_atts, sent_atts])
total_loss += loss_val
total_acc += acc_val
predicted_probabilities = probs_val[0]
sentence_attentions = sent_atts_val[0]
pred_index = np.argmax(probs_val[0])
labels = labels_val[0]
gold = np.argmax(labels)
filename = ''
q_s = ''
for index in q_val[0]:
word = (vocab[index])
q_s += (word + ' ')
filename += (word + '_')
filename += "?"
p_id = str(p_id_val[0].decode("utf-8"))
path = data_conf.EVAL_DIR + "/plots/" + p_id + "_" + str(step) + "/" # + filename
# write attention heat-map
if (p_id != last_p and p_counts < data_conf.PLOT_SAMPLES_NUM):
# if True:
for i, a_att in enumerate(atts_val[0]):
# a_att = np.mean(a_att, 2)
qa_s = q_s + "? (acc: " + str(acc_val) + ")\n "
for index in a_val[0][i]:
word = vocab[index]
qa_s += (word + ' ')
filename += word + "_"
lv = " (label: " + str(int(labels[i])) + " - prediction: " + (
str("%.2f" % (predicted_probabilities[i] * 100))) + "%)"
qa_s += lv
a_sents = []
y_labels = []
for j, att in enumerate(a_att):
a_s = []
y_labels.append(str("%.2f" % (sent_atts_val[0][i][j] * 100)) + "%")
for index in p_val[0][j]:
a_s.append(vocab[index])
a_sents.append(a_s)
util.plot_attention(np.array(a_att), np.array(a_sents), qa_s, y_labels, path, filename)
last_p = p_id
p_counts += 1
print("Sample loss: " + str(loss_val))
print("Sample labels: " + str(labels))
print("Sample probabilities: " + str(predicted_probabilities))
print("Sample acc: " + str(acc_val))
util.print_predictions(data_conf.EVAL_DIR, step, gold, predicted_probabilities, data_conf.MODE)
util.print_sentence_attentions(data_conf.EVAL_DIR, step, sentence_attentions)
step += 1
print("Total acc: " + str(total_acc / step))
print("Local_step: " + str(step * batch_size))
print("Global_step: " + str(gs_val))
print("===========================================")
except tf.errors.OutOfRangeError:
summary = tf.compat.v1.Summary()
summary.value.add(tag='validation_loss', simple_value=total_loss / step)
summary.value.add(tag='validation_accuracy', simple_value=(total_acc / step))
summary_writer.add_summary(summary, gs_val)
keys = util.get_question_keys()
if data_conf.MODE == "val":
with open(data_conf.EVAL_DIR + "/val_accuracy.txt", "a") as file:
file.write("global step: " + str(gs_val) + " - total accuracy: " + str(
total_acc / step) + "- total loss: " + str(total_loss / step) + "\n")
file.write("Types (name / count / correct / accuracy):\n")
for entry in zip(keys, type_counts, type_accs, (type_accs / type_counts)):
file.write(str(entry) + "\n")
file.write("===================================================================" + "\n")
util.save_eval_score(
"global step: " + str(gs_val) + " - acc : " + str(
total_acc / step) + " - total loss: " + str(
total_loss / step) + " - " + data_conf.TRAIN_DIR + "_" + str(gs_val))
finally:
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
train_model()
eval_model()
| 18,358 | 43.132212 | 120 |
py
|
fat-albert
|
fat-albert-master/abmn/src/movieqa/data_conf.py
|
# global initial configuration for the dataset paths for the models
# preprocessing options
P_MAX_WORD_PER_SENT_COUNT = 100
Q_MAX_WORD_PER_SENT_COUNT = 50
P_MAX_SENT_COUNT = 101
# model config
EMBEDDING_SIZE = 300
MODEL_NAME = 'model'
# path settings
DATA_PATH = "data"
RECORD_DIR = "records_new"
TRAIN_RECORD_PATH = RECORD_DIR + "/train"
TEST_RECORD_PATH = RECORD_DIR + "/test"
EMBEDDING_DIR = RECORD_DIR + '/embeddings_' + str(EMBEDDING_SIZE) + "d/"
OUTPUT_DIR = "outputs"
TRAIN_DIR = OUTPUT_DIR + '/train_' + MODEL_NAME + '/'
# vocab settings
PRETRAINED_EMBEDDINGS_PATH = '../../glove/glove.840B.' + str(EMBEDDING_SIZE) + 'd.txt'
VOCAB_SIZE = 27633
# eval config
MODE = 'val'
EVAL_FILE_VERSION = '' # for original, unmodified val and test files
EVAL_FILE = DATA_PATH + '/data/qa.json'
MODE_MODEL_NAME_EVAL_FILE_VERSION = MODE + '_' + MODEL_NAME + EVAL_FILE_VERSION
EVAL_DIR = OUTPUT_DIR + '/' + MODE_MODEL_NAME_EVAL_FILE_VERSION
EVAL_RECORD_PATH = RECORD_DIR + '/' + MODE + EVAL_FILE_VERSION
PLOT_SAMPLES_NUM = 0
| 1,023 | 29.117647 | 86 |
py
|
fat-albert
|
fat-albert-master/abmn/src/movieqa/conf_lstm.py
|
# hierarchical CLSTM training config
# model config
HIDDEN_SIZE = 150
# train config
INITIAL_LEARNING_RATE = 0.0025
BATCH_SIZE = 30
MAX_STEPS = 50000
DROPOUT_RATE = 0.0
LOSS_FUNC = 'entropy'
OPTIMIZER = "adam"
NUM_EPOCHS = 1
| 227 | 15.285714 | 36 |
py
|
fat-albert
|
fat-albert-master/abmn/src/movieqa/get_validation_labels.py
|
import sys
from modify_movieqa import read_qa_json
qas = read_qa_json('data/data/qa.json', split='val')
labels = [qa.correct_index for qa in qas]
f = open("evaluation_file.txt", "x")
counter = 0
for l in labels:
f.write("test:" + str(counter) + " " + int(l))
counter = counter + 1
f.close()
| 300 | 24.083333 | 52 |
py
|
fat-albert
|
fat-albert-master/abmn/src/movieqa/run_cnn.py
|
# train and evaluation functions for hierarchical CNN model
import glob
import os
import tensorflow as tf
tf.compat.v1.disable_v2_behavior()
import time
import core.model as model
import core.util as util
import movieqa.data_conf as data_conf
import movieqa.conf_cnn as model_conf
from random import randrange
from random import shuffle
import numpy as np
if not os.path.exists(data_conf.RECORD_DIR):
os.makedirs(data_conf.RECORD_DIR)
import movieqa.preprocess as pp
pp.create_complete_dataset()
def load_embeddings():
global vectors, vocab, embeddings, place, set_embeddings_op
vectors, vocab = util.load_embeddings(data_conf.EMBEDDING_DIR)
data_conf.VOCAB_SIZE = len(vectors)
# init word embeddings
embeddings = tf.Variable(
tf.random.uniform([data_conf.VOCAB_SIZE, data_conf.EMBEDDING_SIZE], -1.3, 1.3), name="embeddings",
trainable=False)
place = tf.compat.v1.placeholder(tf.float32, shape=embeddings.shape)
set_embeddings_op = tf.compat.v1.assign(embeddings, place, validate_shape=True)
load_embeddings()
ANSWER_COUNT = 5
dropout_op = tf.compat.v1.make_template(name_='dropout', func_=model.dropout)
prepare_op = tf.compat.v1.make_template(name_='prepare_embedding_a', func_=model.prep_embedding)
attention_op = tf.compat.v1.make_template(name_='prepare_attention', func_=model.prep_attention)
compare_op = tf.compat.v1.make_template(name_='compare', func_=model.compare_submult)
compare_op_2 = tf.compat.v1.make_template(name_='compare_2', func_=model.compare_submult)
convolution_op = tf.compat.v1.make_template(name_='convolution', func_=model.cnn)
convolution_op_2 = tf.compat.v1.make_template(name_='convolution1', func_=model.cnn)
soft_prep_op = tf.compat.v1.make_template(name_='softmax', func_=model.softmax_prep)
update_op = tf.compat.v1.make_template(name_='update', func_=model.update_params)
# load embeddings representation for vocab indices
def get_emb(indices):
zero = tf.cast(0, dtype=tf.int64)
zeros = tf.zeros(shape=(tf.shape(input=indices)[0], data_conf.EMBEDDING_SIZE))
condition = tf.greater(indices, zero)
res = tf.compat.v1.where(condition, tf.nn.embedding_lookup(params=embeddings, ids=indices), zeros)
return res
# main batch prediction op
def predict_batch(data, training):
def predict_step(data):
sample = data
q = sample[0]
q = get_emb(q)
answers = sample[1]
# [num_answers x num_words] -> [num_answers x num_words x emb_size]
answers = tf.map_fn(get_emb, answers, dtype=tf.float32)
# [num_sentences x num_words] -> [num_sentences x num_words x emb_size]
p = sample[2]
# Keep the word ids for the plot for printing the original words
# (needed in run_adversarial because plot words are changed in the graph)
p_word_indices = p
p = tf.map_fn(get_emb, p, dtype=tf.float32)
p_drop = dropout_op(p, training, model_conf.DROPOUT_RATE)
# [num_sentences x num_words x emb_size] -> [num_sentences x num_words x hidden_size]
p_prep = prepare_op(p_drop, model_conf.HIDDEN_SIZE)
q_drop = dropout_op(q, training, model_conf.DROPOUT_RATE)
# [num_words x hidden_size]
q_prep = prepare_op(q_drop, model_conf.HIDDEN_SIZE)
answers_drop = dropout_op(answers, training, model_conf.DROPOUT_RATE)
answers_drop = tf.reshape(answers_drop, shape=(ANSWER_COUNT, -1, data_conf.EMBEDDING_SIZE))
answers_prep = prepare_op(answers_drop, model_conf.HIDDEN_SIZE)
# stage one: compare each plot sentence to the question and each answer
def p_sent_step(p_sent):
# compare a plot sentence to the question
hq = attention_op(q_prep, p_sent, model_conf.HIDDEN_SIZE)
tq = compare_op(p_sent, hq, model_conf.HIDDEN_SIZE)
# compare a plot sentence to an answer
def a_step(a):
ha = attention_op(a, p_sent, model_conf.HIDDEN_SIZE)
ta = compare_op(p_sent, ha, model_conf.HIDDEN_SIZE)
return ta
# compare plot sentence to each answer
tanswers = tf.map_fn(a_step, elems=answers_prep)
return tq, tanswers
# tqs: [num_sentences x num_words x hidden_size]
# tas: [num_sentences x num_answers x num_words_in_sentence x hidden_size]
tqs, tas = tf.map_fn(p_sent_step, elems=p_prep, dtype=(tf.float32, tf.float32))
# tas: [num_answers x num_sentences x num_words_in_sentence x hidden_size]
tas = tf.einsum('ijkl->jikl', tas)
q_prep = tf.expand_dims(q_prep, 0)
# [1 x num_words x 2* hidden_size]
q_con = tf.concat([q_prep, q_prep], axis=2)
# [num_answers x num_words x 2* hidden_size]
a_con = tf.concat([answers_prep, answers_prep], axis=2)
# [1 x hidden_size * num_filter_sizes]
rq_sent_feats, _ = convolution_op(q_con, model_conf.FILTER_SIZES, model_conf.HIDDEN_SIZE)
# [num_answers x hidden_size * num_filter_sizes]
ra_sent_feats, _ = convolution_op(a_con, model_conf.FILTER_SIZES, model_conf.HIDDEN_SIZE)
# convolution over weighted plot representation [tq|ta] for one answer
# num_sentences is first dimension of elems
def a_conv(elems):
# ta: [num_sentences x num_words x hidden_size]
# a_sent: [hidden_size * num_filter_sizes]
ta, a_sent = elems
# [num_sentences x num_words x 2* hidden_size]
tqa_con = tf.concat([tqs, ta], axis=2)
# rpqa_sent_feats: [num_sentences x hidden_size * num_filter_sizes]
# att_vis: []?
# TODO two options for word attention visualization:
# 1) after the CNN layer: coloring of words with their context within the sentence
# -> use att_vis
# 2) before the CNN layer (only word attention + compare, analogous to how sentence attention is extracted):
# each word has always the same color within a plot
# -> use word_atts below
rpqa_sent_feats, att_vis = convolution_op(tqa_con, model_conf.FILTER_SIZES, model_conf.HIDDEN_SIZE, 3,
'SAME')
# [num_sentences x num_words]
word_atts = tf.reduce_mean(input_tensor=tqa_con, axis=2)
# we dont need softmax here as we only need values represented as strong and weak coloring of the words
# and no real distribution among all words in the sentence
# looks strange with softmax
# word_atts = tf.nn.softmax(word_atts)
# stage two from here, all sentence features are computed
# [num_sentences x hidden_size * num_filter_sizes]
hq_sent = attention_op(rq_sent_feats, rpqa_sent_feats, model_conf.HIDDEN_SIZE)
# [1 x hidden_size * num_filter_sizes]
a_sent = tf.expand_dims(a_sent, 0)
# [num_sentences x hidden_size * num_filter_sizes]
ha_sent = attention_op(a_sent, rpqa_sent_feats, model_conf.HIDDEN_SIZE)
# compare is element-wise, so dimension of output does not change
tq_sent = compare_op_2(rpqa_sent_feats, hq_sent, model_conf.HIDDEN_SIZE)
ta_sent = compare_op_2(rpqa_sent_feats, ha_sent, model_conf.HIDDEN_SIZE)
# [num_sentences x 2 * hidden_size * num_filter_sizes]
tqa_sent = tf.concat([tq_sent, ta_sent], 1)
return tqa_sent, word_atts
# first dimension of tas and ra_sent_feats is the number of answers
# t_sent: [num_answers x num_sentences x 2 * hidden_size * num_filter_sizes]
# word_atts: [num_answers x num_sentences x num_words]
t_sent, word_atts = tf.map_fn(a_conv, elems=[tas, ra_sent_feats], dtype=(tf.float32, tf.float32))
# [num_answers x hidden_size * num_filter_sizes]
r_final_feats, _ = convolution_op_2(t_sent, model_conf.FILTER_SIZES, model_conf.HIDDEN_SIZE, 1, 'SAME')
# [num_answers]
result = soft_prep_op(r_final_feats, model_conf.HIDDEN_SIZE)
result = tf.reshape(result, shape=[-1])
# [num_answers x num_sentences]
sent_atts = tf.reduce_mean(input_tensor=t_sent, axis=2)
sent_soft = tf.nn.softmax(sent_atts)
return result, word_atts, sent_soft, p_word_indices
predict_step_op = tf.compat.v1.make_template(name_='predict_step', func_=predict_step)
# first dimension of data is batch size
batch_predictions = tf.map_fn(fn=predict_step_op, parallel_iterations=1,
elems=data, infer_shape=False,
dtype=(tf.float32, tf.float32, tf.float32, tf.int64))
return batch_predictions
# get single record sample for set
def get_single_sample(sample):
context_features = {
"question_size": tf.io.FixedLenFeature([], dtype=tf.int64),
"question_type": tf.io.FixedLenFeature([], dtype=tf.int64),
"movie_id": tf.io.FixedLenFeature([], dtype=tf.string),
}
sequence_features = {
"question": tf.io.VarLenFeature(dtype=tf.int64),
"labels": tf.io.VarLenFeature(dtype=tf.float32),
"answers": tf.io.VarLenFeature(dtype=tf.int64),
"plot": tf.io.VarLenFeature(dtype=tf.int64)
}
context_parsed, sequence_parsed = tf.io.parse_single_sequence_example(
serialized=sample,
context_features=context_features,
sequence_features=sequence_features
)
label = sequence_parsed['labels']
answers = sequence_parsed['answers']
plot = sequence_parsed['plot']
question = sequence_parsed['question']
question_type = context_parsed['question_type']
movie_id = context_parsed['movie_id']
plot = tf.sparse.to_dense(plot)
answers = tf.sparse.to_dense(answers)
question = tf.sparse.to_dense(question)
label = tf.sparse.to_dense(label)
answers = tf.reshape(answers, shape=[ANSWER_COUNT, -1])
label = tf.reshape(label, shape=[ANSWER_COUNT])
question = tf.reshape(question, shape=[-1])
return question, answers, label, movie_id, plot, question_type
# main eval function for one epoch
def eval_model():
if not tf.io.gfile.exists(data_conf.EVAL_DIR):
tf.io.gfile.makedirs(data_conf.EVAL_DIR)
util.save_config_values(data_conf, data_conf.EVAL_DIR + "/data")
util.save_config_values(model_conf, data_conf.EVAL_DIR + "/model")
filepath = data_conf.EVAL_RECORD_PATH + '/*'
filenames = glob.glob(filepath)
print("Evaluate model on records stored in %s" % str(filenames))
global_step = tf.compat.v1.train.get_or_create_global_step()
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(get_single_sample)
batch_size = 1
dataset = dataset.padded_batch(batch_size, padded_shapes=(
[None], [ANSWER_COUNT, None], [None], (), [None, None], ()))
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
next_q, next_a, next_l, next_plot_ids, next_plots, next_q_types = iterator.get_next()
logits, atts, sent_atts, pl_d = predict_batch([next_q, next_a, next_plots], training=False)
next_q_types = tf.reshape(next_q_types, ())
probabs = model.compute_probabilities(logits=logits)
loss_example = model.compute_batch_mean_loss(logits, next_l, model_conf.LOSS_FUNC)
accuracy_example = tf.reduce_mean(input_tensor=model.compute_accuracies(logits=logits, labels=next_l, dim=1))
# do not take saved embeddings from model graph for case the vocab size has changed
#to_restore = tf.contrib.slim.get_variables_to_restore(exclude=["embeddings"])
saver = tf.compat.v1.train.Saver()
summary_writer = tf.compat.v1.summary.FileWriter(data_conf.TRAIN_DIR)
step = 0
total_acc = 0.0
total_loss = 0.0
type_counts = np.zeros(6, dtype=np.int32)
type_accs = np.zeros(6)
p_counts = 0
last_p = ''
with tf.compat.v1.Session() as sess:
init_op = tf.group(tf.compat.v1.global_variables_initializer(), tf.compat.v1.local_variables_initializer())
sess.run(init_op)
ckpt = tf.train.get_checkpoint_state(data_conf.TRAIN_DIR)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('No checkpoint file found')
# print("Feeding embeddings %s " % str(vectors))
_ = sess.run(set_embeddings_op, feed_dict={place: vectors})
coord = tf.train.Coordinator()
threads = tf.compat.v1.train.start_queue_runners(sess=sess, coord=coord)
try:
while not coord.should_stop():
loss_val, acc_val, probs_val, gs_val, q_type_val, q_val, atts_val, sent_atts_val, labels_val, p_val, a_val, p_id_val = sess.run(
[loss_example, accuracy_example, probabs, global_step, next_q_types, next_q, atts, sent_atts,
next_l,
pl_d, next_a, next_plot_ids])
type_accs[q_type_val + 1] += acc_val
type_counts[q_type_val + 1] += 1
total_loss += loss_val
total_acc += acc_val
predicted_probabilities = probs_val[0]
sentence_attentions = sent_atts_val[0]
pred_index = np.argmax(predicted_probabilities)
labels = labels_val[0]
gold = np.argmax(labels)
filename = ''
q_s = ''
for index in q_val[0]:
word = (vocab[index])
q_s += (word + ' ')
filename += (word + '_')
filename += "?"
p_id = str(p_id_val[0].decode("utf-8"))
path = data_conf.EVAL_DIR + "/plots/" + p_id + "_" + str(step) + "/" # + filename
# write attention heat-map
if (p_id != last_p and p_counts < data_conf.PLOT_SAMPLES_NUM):
# if True:
for i, a_att in enumerate(atts_val[0]):
qa_s = q_s + "? (acc: " + str(acc_val) + ")\n "
for index in a_val[0][i]:
word = vocab[index]
qa_s += (word + ' ')
filename += word + "_"
lv = " (label: " + str(int(labels[i])) + " - prediction: " + (
str("%.2f" % (predicted_probabilities[i] * 100))) + "%)"
qa_s += lv
a_sents = []
y_labels = []
for j, att in enumerate(a_att):
a_s = []
y_labels.append(str("%.2f" % (sent_atts_val[0][i][j] * 100)) + "%")
for index in p_val[0][j]:
a_s.append(vocab[index])
a_sents.append(a_s)
util.plot_attention(np.array(a_att), np.array(a_sents), qa_s, y_labels, path, filename)
last_p = p_id
p_counts += 1
print("Sample loss: " + str(loss_val))
print("Sample labels: " + str(labels))
print("Sample probabilities: " + str(predicted_probabilities))
print("Sample acc: " + str(acc_val))
util.print_predictions(data_conf.EVAL_DIR, step, gold, predicted_probabilities, data_conf.MODE)
util.print_sentence_attentions(data_conf.EVAL_DIR, step, sentence_attentions)
step += 1
print("Total acc: " + str(total_acc / step))
print("Local_step: " + str(step * batch_size))
print("Global_step: " + str(gs_val))
print("===========================================")
except tf.errors.OutOfRangeError:
summary = tf.compat.v1.Summary()
summary.value.add(tag='validation_loss', simple_value=total_loss / step)
summary.value.add(tag='validation_accuracy', simple_value=(total_acc / step))
summary_writer.add_summary(summary, gs_val)
keys = util.get_question_keys()
if data_conf.MODE == "val":
with open(data_conf.EVAL_DIR + "/val_accuracy.txt", "a") as file:
file.write("global step: " + str(gs_val) + " - total accuracy: " + str(
total_acc / step) + "- total loss: " + str(total_loss / step) + "\n")
file.write("===================================================================" + "\n")
util.save_eval_score(
"global step: " + str(gs_val) + " - acc : " + str(
total_acc / step) + " - total loss: " + str(
total_loss / step) + " - " + data_conf.TRAIN_DIR + "_" + str(gs_val))
finally:
coord.request_stop()
coord.join(threads)
# main training function for one epoch
def train_model():
print("train")
global_step = tf.compat.v1.train.get_or_create_global_step()
init = False
if not tf.io.gfile.exists(data_conf.TRAIN_DIR):
init = True
print("RESTORING WEIGHTS")
tf.io.gfile.makedirs(data_conf.TRAIN_DIR)
util.save_config_values(data_conf, data_conf.TRAIN_DIR + "/data")
util.save_config_values(model_conf, data_conf.TRAIN_DIR + "/model")
filenames = glob.glob(data_conf.TRAIN_RECORD_PATH + '/*')
print("Reading training dataset from %s" % filenames)
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(get_single_sample)
dataset = dataset.shuffle(buffer_size=9000)
dataset = dataset.repeat(model_conf.NUM_EPOCHS)
batch_size = model_conf.BATCH_SIZE
dataset = dataset.padded_batch(model_conf.BATCH_SIZE, padded_shapes=(
[None], [ANSWER_COUNT, None], [None], (), [None, None], ()))
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
next_q, next_a, next_l, next_plot_ids, next_plots, next_q_types = iterator.get_next()
logits, _, _, _ = predict_batch([next_q, next_a, next_plots], training=True)
probabs = model.compute_probabilities(logits=logits)
loss_batch = model.compute_batch_mean_loss(logits, next_l, model_conf.LOSS_FUNC)
accuracy = model.compute_accuracies(logits=logits, labels=next_l, dim=1)
accuracy_batch = tf.reduce_mean(input_tensor=accuracy)
tf.compat.v1.summary.scalar("train_accuracy", accuracy_batch)
tf.compat.v1.summary.scalar("train_loss", loss_batch)
training_op = update_op(loss_batch, global_step, model_conf.OPTIMIZER, model_conf.INITIAL_LEARNING_RATE)
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
with tf.compat.v1.train.MonitoredTrainingSession(
checkpoint_dir=data_conf.TRAIN_DIR,
save_checkpoint_secs=60,
save_summaries_steps=5,
hooks=[tf.estimator.StopAtStepHook(last_step=model_conf.MAX_STEPS),
], config=config) as sess:
step = 0
total_acc = 0.0
if init:
_ = sess.run(set_embeddings_op, feed_dict={place: vectors})
while not sess.should_stop():
_, loss_val, acc_val, probs_val, lab_val, gs_val = sess.run(
[training_op, loss_batch, accuracy_batch, probabs, next_l, global_step])
print(probs_val)
print(lab_val)
print("Batch loss: " + str(loss_val))
print("Batch acc: " + str(acc_val))
step += 1
total_acc += acc_val
print("Total acc: " + str(total_acc / step))
print("Local_step: " + str(step * batch_size))
print("Global_step: " + str(gs_val))
print("===========================================")
util.copy_model(data_conf.TRAIN_DIR, gs_val)
if __name__ == '__main__':
eval_model()
| 20,000 | 43.25 | 144 |
py
|
fat-albert
|
fat-albert-master/abmn/src/movieqa/run_adversarial_white_box.py
|
# word-level and sentence-level white-box adversarial attack for hierarchical CNN and RNN-LSTM model
import glob
import os
import sys
import tensorflow as tf
import core.model as model
import core.util as util
import movieqa.data_conf as data_conf
from random import randrange
from random import shuffle
import numpy as np
model_conf = None
if not os.path.exists(data_conf.RECORD_DIR):
os.makedirs(data_conf.RECORD_DIR)
import movieqa.preprocess as pp
pp.create_complete_dataset()
vectors, vocab = util.load_embeddings(data_conf.EMBEDDING_DIR)
data_conf.VOCAB_SIZE = len(vectors)
ANSWER_COUNT = 5
dropout_op = tf.compat.v1.make_template(name_='dropout', func_=model.dropout)
prepare_op = tf.compat.v1.make_template(name_='prepare_embedding_a', func_=model.prep_embedding)
attention_op = tf.compat.v1.make_template(name_='prepare_attention', func_=model.prep_attention)
compare_op = tf.compat.v1.make_template(name_='compare', func_=model.compare_submult)
compare_op_2 = tf.compat.v1.make_template(name_='compare_2', func_=model.compare_submult)
convolution_op = tf.compat.v1.make_template(name_='convolution', func_=model.cnn)
convolution_op_2 = tf.compat.v1.make_template(name_='convolution1', func_=model.cnn)
soft_prep_op = tf.compat.v1.make_template(name_='softmax', func_=model.softmax_prep)
update_op = tf.compat.v1.make_template(name_='update', func_=model.update_params)
lstm_op = tf.compat.v1.make_template(name_='lstm1', func_=model.lstm)
lstm_op_2 = tf.compat.v1.make_template(name_='lstm2', func_=model.lstm)
embeddings = tf.Variable(
tf.random.uniform([data_conf.VOCAB_SIZE, data_conf.EMBEDDING_SIZE], -1.3, 1.3), name="embeddings", trainable=False)
place = tf.compat.v1.placeholder(tf.float32, shape=embeddings.shape)
set_embeddings_op = tf.compat.v1.assign(embeddings, place, validate_shape=True)
# word-level attack: modify words in the most attended sentence
def modify_plot_sentence(p, w_att, s_att, labels, num_modified_words, percentage_attacked_samples):
prob = randrange(0, 100)
if prob <= percentage_attacked_samples:
for i, p_samp in enumerate(p):
w_samp = w_att[i]
s_samp = s_att[i]
l_samp = labels[i]
corr_ind = np.argmax(l_samp)
w_corr = w_samp[corr_ind]
w_corr = np.mean(w_corr, 2)
s_corr = s_samp[corr_ind]
s_max_ind = np.argmax(s_corr)
p_sent = p_samp[s_max_ind]
w_red = w_corr[s_max_ind]
w_order = np.argsort(w_red).tolist()
valid_count = 0
for id in p_sent:
if id == 0:
break
else:
valid_count += 1
s_len = valid_count
if valid_count > 0:
rand_inds = [z for z in range(valid_count)]
shuffle(rand_inds)
else:
rand_inds = []
for k in range(0, num_modified_words):
replace_most_attended_words = True
if replace_most_attended_words:
if k >= s_len:
break
m_ind = int(w_order.pop())
r_ind = m_ind
# replace random words, not used in the experiments reported in the paper
else:
if valid_count > 0 and len(rand_inds) > 0:
r_ind = rand_inds.pop()
else:
r_ind = 0
r_word = randrange(0, data_conf.VOCAB_SIZE)
p[i][s_max_ind][r_ind] = r_word
return p
# sentence-level attack: remove the most attended sentence
def remove_plot_sentence(p, s_att, labels):
m_p = np.zeros(shape=(1, len(p[0]) - 1, len(p[0][0])), dtype=np.int64)
for i, p_samp in enumerate(p):
s_samp = s_att[i]
l_samp = labels[i]
corr_ind = np.argmax(l_samp)
s_corr = s_samp[corr_ind]
s_max_ind = np.argmax(s_corr)
sl1 = p[i][:s_max_ind]
if s_max_ind < (len(p[i]) - 1):
sl2 = p[i][s_max_ind + 1:]
conc = np.concatenate([sl1, sl2])
m_p[i] = conc
else:
m_p[i] = p[i][:len(p[i]) - 1]
return m_p
# load embeddings representation for vocab indices
def get_emb(indices):
zero = tf.cast(0, dtype=tf.int64)
zeros = tf.zeros(shape=(tf.shape(input=indices)[0], data_conf.EMBEDDING_SIZE))
condition = tf.greater(indices, zero)
res = tf.compat.v1.where(condition, tf.nn.embedding_lookup(params=embeddings, ids=indices), zeros)
return res
# main batch prediction op
def predict_batch(model_type, data, training):
def predict_step(data):
sample = data
q = sample[0]
q = get_emb(q)
aws = sample[1]
aws = tf.map_fn(get_emb, aws, dtype=tf.float32)
p = sample[2]
with tf.device('/cpu:0'):
p_d = p
p = tf.map_fn(get_emb, p_d, dtype=tf.float32)
p_drop = dropout_op(p, training, model_conf.DROPOUT_RATE)
p_drop = tf.reshape(p_drop, shape=(tf.shape(input=p)[0], -1, data_conf.EMBEDDING_SIZE))
p_prep = prepare_op(p_drop, model_conf.HIDDEN_SIZE)
q_drop = dropout_op(q, training, model_conf.DROPOUT_RATE)
q_prep = prepare_op(q_drop, model_conf.HIDDEN_SIZE)
aws_drop = dropout_op(aws, training, model_conf.DROPOUT_RATE)
aws_drop = tf.reshape(aws_drop, shape=(ANSWER_COUNT, -1, data_conf.EMBEDDING_SIZE))
aws_prep = prepare_op(aws_drop, model_conf.HIDDEN_SIZE)
# stage one
def p_sent_step(p_sent):
h = attention_op(q_prep, p_sent, model_conf.HIDDEN_SIZE)
t = compare_op(p_sent, h, model_conf.HIDDEN_SIZE)
def a_step(a):
ha = attention_op(a, p_sent, model_conf.HIDDEN_SIZE)
ta = compare_op(p_sent, ha, model_conf.HIDDEN_SIZE)
return ta
a_feats = tf.map_fn(a_step, elems=aws_prep)
return t, a_feats
tqs, tas = tf.map_fn(p_sent_step, elems=p_prep, dtype=(tf.float32, tf.float32))
tas = tf.einsum('ijkl->jikl', tas)
q_prep = tf.expand_dims(q_prep, 0)
q_con = tf.concat([q_prep, q_prep], axis=2)
a_con = tf.concat([aws_prep, aws_prep], axis=2)
if model_type == "lstm":
q_sent_feats = lstm_op(q_con, model_conf.HIDDEN_SIZE)
a_sent_feats = lstm_op(a_con, model_conf.HIDDEN_SIZE)
else:
q_sent_feats, _ = convolution_op(q_con, model_conf.FILTER_SIZES, model_conf.HIDDEN_SIZE)
a_sent_feats, _ = convolution_op(a_con, model_conf.FILTER_SIZES, model_conf.HIDDEN_SIZE)
def a_conv(elems):
ta, a_sent = elems
con = tf.concat([tqs, ta], axis=2)
# att_vis = tf.reduce_mean(con, axis=2)
if model_type == "lstm":
pqa_sent_feats = lstm_op(con, model_conf.HIDDEN_SIZE)
else:
pqa_sent_feats, _ = convolution_op(con, model_conf.FILTER_SIZES, model_conf.HIDDEN_SIZE, 3, 'SAME')
# stage two from here, all sentence features are computed
hq_sent = attention_op(q_sent_feats, pqa_sent_feats, model_conf.HIDDEN_SIZE)
a_sent = tf.expand_dims(a_sent, 0)
ha_sent = attention_op(a_sent, pqa_sent_feats, model_conf.HIDDEN_SIZE)
tq_sent = compare_op_2(pqa_sent_feats, hq_sent, model_conf.HIDDEN_SIZE)
ta_sent = compare_op_2(pqa_sent_feats, ha_sent, model_conf.HIDDEN_SIZE)
sent_feats = tf.concat([tq_sent, ta_sent], 1)
return sent_feats, con
t_sent, pqa_atts = tf.map_fn(a_conv, elems=[tas, a_sent_feats], dtype=(tf.float32, tf.float32))
if model_type == "lstm":
r_final_feats = lstm_op_2(t_sent, model_conf.HIDDEN_SIZE)
else:
r_final_feats, _ = convolution_op_2(t_sent, model_conf.FILTER_SIZES, model_conf.HIDDEN_SIZE, 1)
sent_atts = tf.reduce_mean(input_tensor=t_sent, axis=2)
sent_soft = tf.nn.softmax(sent_atts)
result = soft_prep_op(r_final_feats, model_conf.HIDDEN_SIZE)
result = tf.reshape(result, shape=[-1])
return result, pqa_atts, sent_soft, p_d
predict_step_op = tf.compat.v1.make_template(name_='predict_step', func_=predict_step)
batch_predictions = tf.map_fn(fn=predict_step_op, parallel_iterations=1,
elems=data, infer_shape=False,
dtype=(tf.float32, tf.float32, tf.float32, tf.int64))
return batch_predictions
# get single record sample for set
def get_single_sample(sample):
context_features = {
"question_size": tf.io.FixedLenFeature([], dtype=tf.int64),
"question_type": tf.io.FixedLenFeature([], dtype=tf.int64),
"movie_id": tf.io.FixedLenFeature([], dtype=tf.string),
}
sequence_features = {
"question": tf.io.VarLenFeature(dtype=tf.int64),
"labels": tf.io.VarLenFeature(dtype=tf.float32),
"answers": tf.io.VarLenFeature(dtype=tf.int64),
"plot": tf.io.VarLenFeature(dtype=tf.int64)
}
context_parsed, sequence_parsed = tf.io.parse_single_sequence_example(
serialized=sample,
context_features=context_features,
sequence_features=sequence_features
)
label = sequence_parsed['labels']
answers = sequence_parsed['answers']
plot = sequence_parsed['plot']
question = sequence_parsed['question']
question_type = context_parsed['question_type']
movie_id = context_parsed['movie_id']
plot = tf.sparse.to_dense(plot)
answers = tf.sparse.to_dense(answers)
question = tf.sparse.to_dense(question)
label = tf.sparse.to_dense(label)
answers = tf.reshape(answers, shape=[ANSWER_COUNT, -1])
label = tf.reshape(label, shape=[ANSWER_COUNT])
question = tf.reshape(question, shape=[-1])
return question, answers, label, movie_id, plot, question_type
# main train function for one epoch
def train_model(model_type, attack_level, num_modified_words, percentage_attacked_samples):
print("train")
print("%s white-box adversarial attack modifies %d words of %d%% of the instances: " % (
attack_level, num_modified_words, percentage_attacked_samples))
global model_conf
if model_type == "lstm":
import movieqa.conf_lstm as model_conf
else:
import movieqa.conf_cnn as model_conf
global_step = tf.contrib.framework.get_or_create_global_step()
init = False
if not tf.io.gfile.exists(data_conf.TRAIN_DIR):
init = True
print("RESTORING WEIGHTS")
tf.io.gfile.makedirs(data_conf.TRAIN_DIR)
util.save_config_values(data_conf, data_conf.TRAIN_DIR + "/data")
util.save_config_values(model_conf, data_conf.TRAIN_DIR + "/model")
filenames = glob.glob(data_conf.TRAIN_RECORD_PATH + '/*')
print("Reading training dataset from %s" % filenames)
dataset = tf.contrib.data.TFRecordDataset(filenames)
dataset = dataset.map(get_single_sample)
dataset = dataset.shuffle(buffer_size=9000)
dataset = dataset.repeat(data_conf.NUM_EPOCHS)
batch_size = data_conf.BATCH_SIZE
dataset = dataset.padded_batch(data_conf.BATCH_SIZE, padded_shapes=(
[None], [ANSWER_COUNT, None], [None], (), [None, None], ()))
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
next_q, next_a, next_l, next_plot_ids, next_plots, next_q_types = iterator.get_next()
_, w_atts, s_atts, _ = predict_batch(model_type, [next_q, next_a, next_plots], training=True)
if attack_level == "sentence":
m_p = tf.compat.v1.py_func(remove_plot_sentence, [next_plots, s_atts, next_l], [tf.int64])[0]
elif attack_level == "word":
m_p = tf.compat.v1.py_func(modify_plot_sentence, [next_plots, w_atts, s_atts, next_l], [tf.int64])[0]
logits, _, _, _ = predict_batch(model_type, [next_q, next_a, m_p], training=True)
probabs = model.compute_probabilities(logits=logits)
loss_batch = model.compute_batch_mean_loss(logits, next_l, model_conf.LOSS_FUNC)
accuracy = model.compute_accuracies(logits=logits, labels=next_l, dim=1)
accuracy_batch = tf.reduce_mean(input_tensor=accuracy)
tf.compat.v1.summary.scalar("train_accuracy", accuracy_batch)
tf.compat.v1.summary.scalar("train_loss", loss_batch)
training_op = update_op(loss_batch, global_step, model_conf.OPTIMIZER, model_conf.INITIAL_LEARNING_RATE)
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
with tf.compat.v1.train.MonitoredTrainingSession(
checkpoint_dir=data_conf.TRAIN_DIR,
save_checkpoint_secs=60,
save_summaries_steps=5,
hooks=[tf.estimator.StopAtStepHook(last_step=model_conf.MAX_STEPS),
], config=config) as sess:
step = 0
total_acc = 0.0
if init:
_ = sess.run(set_embeddings_op, feed_dict={place: vectors})
while not sess.should_stop():
_, loss_val, acc_val, probs_val, lab_val, gs_val = sess.run(
[training_op, loss_batch, accuracy_batch, probabs, next_l, global_step])
print(probs_val)
print(lab_val)
print("Batch loss: " + str(loss_val))
print("Batch acc: " + str(acc_val))
step += 1
total_acc += acc_val
print("Total acc: " + str(total_acc / step))
print("Local_step: " + str(step * batch_size))
print("Global_step: " + str(gs_val))
print("===========================================")
util.copy_model(data_conf.TRAIN_DIR, gs_val)
# main eval function for one epoch
def eval_model(model_type, attack_level, num_modified_words, percentage_attacked_samples):
print("evaluate")
print("%s white-box adversarial attack modifies %d words of %d%% of the instances: " % (
attack_level, num_modified_words, percentage_attacked_samples))
global model_conf
if model_type == "lstm":
import movieqa.conf_lstm as model_conf
else:
import movieqa.conf_cnn as model_conf
if not tf.io.gfile.exists(data_conf.EVAL_DIR):
tf.io.gfile.makedirs(data_conf.EVAL_DIR)
util.save_config_values(data_conf, data_conf.TRAIN_DIR + "/data")
util.save_config_values(model_conf, data_conf.TRAIN_DIR + "/model")
filepath = data_conf.EVAL_RECORD_PATH + '/*'
filenames = glob.glob(filepath)
print("Evaluating adversarial attack on %s" % filenames)
global_step = tf.contrib.framework.get_or_create_global_step()
dataset = tf.contrib.data.TFRecordDataset(filenames)
dataset = dataset.map(get_single_sample)
batch_size = 1
dataset = dataset.padded_batch(batch_size, padded_shapes=(
[None], [ANSWER_COUNT, None], [None], (), [None, None], ()))
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
next_q, next_a, next_l, next_plot_ids, next_plots, next_q_types = iterator.get_next()
_, w_atts, s_atts, _ = predict_batch(model_type, [next_q, next_a, next_plots], training=False)
if attack_level == "sentence":
m_p = tf.compat.v1.py_func(remove_plot_sentence, [next_plots, s_atts, next_l], [tf.int64])[0]
elif attack_level == "word":
m_p = tf.compat.v1.py_func(modify_plot_sentence,
[next_plots, w_atts, s_atts, next_l, num_modified_words, percentage_attacked_samples],
[tf.int64])[0]
logits, atts, sent_atts, pl_d = predict_batch(model_type, [next_q, next_a, m_p], training=False)
next_q_types = tf.reshape(next_q_types, ())
probabs = model.compute_probabilities(logits=logits)
loss_example = model.compute_batch_mean_loss(logits, next_l, model_conf.LOSS_FUNC)
accuracy_example = tf.reduce_mean(input_tensor=model.compute_accuracies(logits=logits, labels=next_l, dim=1))
to_restore = tf.contrib.slim.get_variables_to_restore(exclude=["embeddings"])
saver = tf.compat.v1.train.Saver(to_restore)
summary_writer = tf.compat.v1.summary.FileWriter(data_conf.TRAIN_DIR)
step = 0
total_acc = 0.0
total_prec = 0.0
total_rank = 0.0
total_loss = 0.0
type_counts = np.zeros(6, dtype=np.int32)
type_accs = np.zeros(6)
max_sent_atts = {}
max_atts = {}
p_counts = 0
last_p = ''
with tf.compat.v1.Session() as sess:
init_op = tf.group(tf.compat.v1.global_variables_initializer(), tf.compat.v1.local_variables_initializer())
sess.run(init_op)
ckpt = tf.train.get_checkpoint_state(data_conf.TRAIN_DIR)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('No checkpoint file found')
_ = sess.run(set_embeddings_op, feed_dict={place: vectors})
coord = tf.train.Coordinator()
threads = tf.compat.v1.train.start_queue_runners(sess=sess, coord=coord)
try:
while not coord.should_stop():
loss_val, acc_val, probs_val, gs_val, q_type_val, q_val, atts_val, sent_atts_val, labels_val, p_val, a_val, p_id_val = sess.run(
[loss_example, accuracy_example, probabs, global_step, next_q_types, next_q, atts, sent_atts,
next_l,
pl_d, next_a, next_plot_ids])
type_accs[q_type_val + 1] += acc_val
type_counts[q_type_val + 1] += 1
predicted_probabilities = probs_val[0]
sentence_attentions = sent_atts_val[0]
total_loss += loss_val
total_acc += acc_val
pred_index = np.argmax(probs_val[0])
labels = labels_val[0]
gold = np.argmax(labels)
filename = ''
q_s = ''
for index in q_val[0]:
word = (vocab[index])
q_s += (word + ' ')
filename += (word + '_')
p_id = str(p_id_val[0].decode("utf-8"))
path = data_conf.EVAL_DIR + "/plots/" + p_id + "/" + filename
corr_ans = np.argmax(labels_val[0])
max_att_val = np.argmax(sent_atts_val[0][corr_ans])
att_row = np.max(atts_val[0][corr_ans][max_att_val], 1)
red = np.max(atts_val[0][corr_ans][max_att_val], 1)
att_inds = np.argsort(red)[::-1]
if (p_id != last_p and p_counts < 20):
for i, a_att in enumerate(atts_val[0]):
a_att = np.mean(a_att, 2)
qa_s = q_s + "? (acc: " + str(acc_val) + ")\n "
for index in a_val[0][i]:
qa_s += (vocab[index] + ' ')
lv = " (label: " + str(int(labels_val[0][i])) + " - prediction: " + (
str("%.2f" % (probs_val[0][i] * 100))) + "%)"
qa_s += lv
a_sents = []
y_labels = []
for j, att in enumerate(a_att):
a_s = []
y_labels.append(str("%.2f" % (sent_atts_val[0][i][j] * 100)) + "%")
for index in p_val[0][j]:
a_s.append(vocab[index])
a_sents.append(a_s)
# util.plot_attention(np.array(a_att), np.array(a_sents),qa_s,y_labels,path,filename)
last_p = p_id
p_counts += 1
m_ap = util.example_precision(probs_val[0], labels_val[0], 5)
rank = util.example_rank(probs_val[0], labels_val[0], 5)
total_prec += m_ap
total_rank += rank
print("Sample loss: " + str(loss_val))
print("Sample acc: " + str(acc_val))
print("Sample prec: " + str(m_ap))
print("Sample rank: " + str(rank))
util.print_predictions(data_conf.EVAL_DIR, step, gold, predicted_probabilities, data_conf.MODE)
util.print_sentence_attentions(data_conf.EVAL_DIR, step, sentence_attentions)
step += 1
print("Total acc: " + str(total_acc / step))
print("Total prec: " + str(total_prec / step))
print("Total rank: " + str(total_rank / step))
print("Local_step: " + str(step * batch_size))
print("Global_step: " + str(gs_val))
if attack_level == "word":
print("%d modified word(s)" % num_modified_words)
print("===========================================")
except tf.errors.OutOfRangeError:
summary = tf.compat.v1.Summary()
summary.value.add(tag='validation_loss', simple_value=total_loss / step)
summary.value.add(tag='validation_accuracy', simple_value=(total_acc / step))
summary_writer.add_summary(summary, gs_val)
keys = util.get_question_keys()
with open(data_conf.EVAL_DIR + "/accuracy.txt", "a") as file:
file.write("global step: " + str(gs_val) + " - total accuracy: " + str(
total_acc / step) + "- total loss: " + str(total_loss / step) + str(num_modified_words) + "" "\n")
file.write("Types (name / count / correct / accuracy):\n")
for entry in zip(keys, type_counts, type_accs, (type_accs / type_counts)):
file.write(str(entry) + "\n")
file.write("===================================================================" + "\n")
util.save_eval_score(
"global step: " + str(gs_val) + " - acc : " + str(
total_acc / step) + " - total loss: " + str(
total_loss / step) + " - " + data_conf.TRAIN_DIR + "_" + str(gs_val))
finally:
coord.request_stop()
coord.join(threads)
| 22,121 | 40.897727 | 144 |
py
|
fat-albert
|
fat-albert-master/abmn/src/movieqa/run_cnn_word_level.py
|
# original baseline for movieqa without sentence attention, may be not up to date
import glob
import os
import tensorflow as tf
tf.compat.v1.disable_v2_behavior()
import core.model as model
import core.util as util
import movieqa.data_conf as data_conf
import movieqa.conf_cnn_word_level as model_conf
import numpy as np
print("loading records from %s, loading embeddings from %s" % (data_conf.RECORD_DIR, data_conf.EMBEDDING_DIR))
if not os.path.exists(data_conf.RECORD_DIR):
os.makedirs(data_conf.RECORD_DIR)
import movieqa.preprocess as pp
pp.create_complete_dataset()
def load_embeddings():
global vectors, vocab, embeddings, place, set_embeddings_op
vectors, vocab = util.load_embeddings(data_conf.EMBEDDING_DIR)
data_conf.VOCAB_SIZE = len(vectors)
# init word embeddings
embeddings = tf.Variable(
tf.random.uniform([data_conf.VOCAB_SIZE, data_conf.EMBEDDING_SIZE], -1.3, 1.3), name="embeddings",
trainable=False)
place = tf.compat.v1.placeholder(tf.float32, shape=embeddings.shape)
set_embeddings_op = tf.compat.v1.assign(embeddings, place, validate_shape=True)
load_embeddings()
ANSWER_COUNT = 5
dropout_op = tf.compat.v1.make_template(name_='dropout', func_=model.dropout)
prepare_op = tf.compat.v1.make_template(name_='prepare_embedding_a', func_=model.prep_embedding)
attention_op = tf.compat.v1.make_template(name_='prepare_attention', func_=model.prep_attention)
compare_op = tf.compat.v1.make_template(name_='compare', func_=model.compare_submult)
compare_op_2 = tf.compat.v1.make_template(name_='compare_2', func_=model.compare_submult)
convolution_op = tf.compat.v1.make_template(name_='convolution', func_=model.cnn)
convolution_op_2 = tf.compat.v1.make_template(name_='convolution', func_=model.cnn)
soft_prep_op = tf.compat.v1.make_template(name_='softmax', func_=model.softmax_prep)
update_op = tf.compat.v1.make_template(name_='update', func_=model.update_params)
def get_emb(indices):
zero = tf.cast(0, dtype=tf.int64)
zeros = tf.zeros(shape=(tf.shape(input=indices)[0], data_conf.EMBEDDING_SIZE))
condition = tf.greater(indices, zero)
res = tf.compat.v1.where(condition, tf.nn.embedding_lookup(params=embeddings, ids=indices), zeros)
return res
# main prediction op for batch
def predict_batch(data, training):
def predict_step(data):
sample = data
q = sample[0]
q = get_emb(q)
q_drop = dropout_op(q, training, model_conf.DROPOUT_RATE)
aws = sample[1]
p = sample[2]
p = tf.reshape(p, shape=[-1])
p = get_emb(p)
p_drop = dropout_op(p, training, model_conf.DROPOUT_RATE)
q_prep = prepare_op(q_drop, model_conf.HIDDEN_SIZE)
p_prep = prepare_op(p_drop, model_conf.HIDDEN_SIZE)
h = attention_op(q_prep, p_prep, model_conf.HIDDEN_SIZE)
t = compare_op(p_prep, h, model_conf.HIDDEN_SIZE)
def answer_step(a):
a = get_emb(a)
a_drop = dropout_op(a, training, model_conf.DROPOUT_RATE)
a_prep = prepare_op(a_drop, model_conf.HIDDEN_SIZE)
h2 = attention_op(a_prep, p_prep, model_conf.HIDDEN_SIZE)
t2 = compare_op(p_prep, h2, model_conf.HIDDEN_SIZE)
t_con = tf.concat([t, t2], axis=1)
return t_con
output = tf.map_fn(answer_step, elems=aws, dtype=tf.float32)
output, _ = convolution_op(output, model_conf.FILTER_SIZES, model_conf.HIDDEN_SIZE)
result = soft_prep_op(output, model_conf.HIDDEN_SIZE)
result = tf.reshape(result, shape=[-1])
return result
predict_step_op = tf.compat.v1.make_template(name_='predict_step', func_=predict_step)
batch_predictions = tf.map_fn(fn=predict_step_op, parallel_iterations=10,
elems=data, infer_shape=False,
dtype=tf.float32)
return batch_predictions
# load next sample from set's record files
def get_single_sample(sample):
context_features = {
"question_size": tf.io.FixedLenFeature([], dtype=tf.int64),
"question_type": tf.io.FixedLenFeature([], dtype=tf.int64),
"movie_id": tf.io.FixedLenFeature([], dtype=tf.string),
}
sequence_features = {
"question": tf.io.VarLenFeature(dtype=tf.int64),
"labels": tf.io.VarLenFeature(dtype=tf.float32),
"answers": tf.io.VarLenFeature(dtype=tf.int64),
"plot": tf.io.VarLenFeature(dtype=tf.int64)
}
context_parsed, sequence_parsed = tf.io.parse_single_sequence_example(
serialized=sample,
context_features=context_features,
sequence_features=sequence_features
)
label = sequence_parsed['labels']
answers = sequence_parsed['answers']
plot = sequence_parsed['plot']
question = sequence_parsed['question']
question_type = context_parsed['question_type']
movie_id = context_parsed['movie_id']
plot = tf.sparse.to_dense(plot)
answers = tf.sparse.to_dense(answers)
question = tf.sparse.to_dense(question)
label = tf.sparse.to_dense(label)
answers = tf.reshape(answers, shape=[ANSWER_COUNT, -1])
label = tf.reshape(label, shape=[ANSWER_COUNT])
question = tf.reshape(question, shape=[-1])
plot = tf.reshape(plot, shape=[-1])
return question, answers, label, movie_id, plot, question_type
# main training function for one epoch
def train_model():
print("train")
global_step = tf.compat.v1.train.get_or_create_global_step()
init = False
if not tf.io.gfile.exists(data_conf.TRAIN_DIR):
init = True
tf.io.gfile.makedirs(data_conf.TRAIN_DIR)
util.save_config_values(data_conf, data_conf.TRAIN_DIR + "/data")
util.save_config_values(model_conf, data_conf.TRAIN_DIR + "/model")
filenames = glob.glob(data_conf.TRAIN_RECORD_PATH + '/*')
print("Reading training dataset from %s" % filenames)
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(get_single_sample)
dataset = dataset.shuffle(buffer_size=9000)
dataset = dataset.repeat(model_conf.NUM_EPOCHS)
batch_size = model_conf.BATCH_SIZE
dataset = dataset.padded_batch(model_conf.BATCH_SIZE, padded_shapes=(
[None], [ANSWER_COUNT, None], [None], (), [None], ()))
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
next_q, next_a, next_l, next_plot_ids, next_plots, next_q_types = iterator.get_next()
logits = predict_batch([next_q, next_a, next_plots], training=True)
probabs = model.compute_probabilities(logits=logits)
loss_batch = model.compute_batch_mean_loss(logits, next_l, model_conf.LOSS_FUNC)
accuracy = model.compute_accuracies(logits=logits, labels=next_l, dim=1)
accuracy_batch = tf.reduce_mean(input_tensor=accuracy)
tf.compat.v1.summary.scalar("train_accuracy", accuracy_batch)
tf.compat.v1.summary.scalar("train_loss", loss_batch)
training_op = update_op(loss_batch, global_step, model_conf.OPTIMIZER, model_conf.INITIAL_LEARNING_RATE)
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
with tf.compat.v1.train.MonitoredTrainingSession(
checkpoint_dir=data_conf.TRAIN_DIR,
save_checkpoint_secs=60,
save_summaries_steps=5,
hooks=[tf.estimator.StopAtStepHook(last_step=model_conf.MAX_STEPS),
], config=config) as sess:
step = 0
total_acc = 0.0
if init:
_ = sess.run(set_embeddings_op, feed_dict={place: vectors})
while not sess.should_stop():
_, loss_val, acc_val, probs_val, lab_val, gs_val = sess.run(
[training_op, loss_batch, accuracy_batch, probabs, next_l, global_step])
print(probs_val)
print(lab_val)
print("Batch loss: " + str(loss_val))
print("Batch acc: " + str(acc_val))
step += 1
total_acc += acc_val
print("Total acc: " + str(total_acc / step))
print("Local_step: " + str(step * batch_size))
print("Global_step: " + str(gs_val))
print("===========================================")
util.copy_model(data_conf.TRAIN_DIR, gs_val)
# main eval / testing function
def eval_model():
if not tf.io.gfile.exists(data_conf.EVAL_DIR):
tf.io.gfile.makedirs(data_conf.EVAL_DIR)
util.save_config_values(data_conf, data_conf.EVAL_DIR + "/data")
util.save_config_values(model_conf, data_conf.EVAL_DIR + "/model")
filepath = data_conf.EVAL_RECORD_PATH + '/*'
filenames = glob.glob(filepath)
print("Evaluate model on %s" % str(filenames))
global_step = tf.compat.v1.train.get_or_create_global_step()
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(get_single_sample)
batch_size = 1
dataset = dataset.padded_batch(batch_size, padded_shapes=(
[None], [ANSWER_COUNT, None], [None], (), [None], ()))
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
next_q, next_a, next_l, next_plot_ids, next_plots, next_q_types = iterator.get_next()
logits = predict_batch([next_q, next_a, next_plots], training=False)
next_q_types = tf.reshape(next_q_types, ())
probabs = model.compute_probabilities(logits=logits)
loss_example = model.compute_batch_mean_loss(logits, next_l, model_conf.LOSS_FUNC)
accuracy_example = tf.reduce_mean(input_tensor=model.compute_accuracies(logits=logits, labels=next_l, dim=1))
saver = tf.compat.v1.train.Saver()
summary_writer = tf.compat.v1.summary.FileWriter(data_conf.TRAIN_DIR)
step = 0
total_acc = 0.0
total_loss = 0.0
type_counts = np.zeros(6, dtype=np.int32)
type_accs = np.zeros(6)
with tf.compat.v1.Session() as sess:
init_op = tf.group(tf.compat.v1.global_variables_initializer(), tf.compat.v1.local_variables_initializer())
sess.run(init_op)
ckpt = tf.train.get_checkpoint_state(data_conf.TRAIN_DIR)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('No checkpoint file found')
coord = tf.train.Coordinator()
threads = tf.compat.v1.train.start_queue_runners(sess=sess, coord=coord)
try:
while not coord.should_stop():
loss_val, acc_val, probs_val, gs_val, q_type_val, labels_val = sess.run(
[loss_example, accuracy_example, probabs, global_step, next_q_types, next_l])
predicted_probabilities = probs_val[0]
pred_index = np.argmax(probs_val[0])
labels = labels_val[0]
gold = np.argmax(labels)
type_accs[q_type_val + 1] += acc_val
type_counts[q_type_val + 1] += 1
total_loss += loss_val
total_acc += acc_val
print("Sample loss: " + str(loss_val))
print("Sample acc: " + str(acc_val))
util.print_predictions(data_conf.EVAL_DIR, step, gold, predicted_probabilities, data_conf.MODE)
step += 1
print("Total acc: " + str(total_acc / step))
print("Local_step: " + str(step * batch_size))
print("Global_step: " + str(gs_val))
print("===========================================")
except tf.errors.OutOfRangeError:
summary = tf.compat.v1.Summary()
summary.value.add(tag='validation_loss', simple_value=total_loss / step)
summary.value.add(tag='validation_accuracy', simple_value=(total_acc / step))
summary_writer.add_summary(summary, gs_val)
keys = util.get_question_keys()
if data_conf.MODE == "val":
with open(data_conf.EVAL_DIR + "/val_accuracy.txt", "a") as file:
file.write("global step: " + str(gs_val) + " - total accuracy: " + str(
total_acc / step) + "- total loss: " + str(total_loss / step) + "\n")
file.write("Types (name / count / correct / accuracy):\n")
for entry in zip(keys, type_counts, type_accs, (type_accs / type_counts)):
file.write(str(entry) + "\n")
file.write("===================================================================" + "\n")
util.save_eval_score(
"global step: " + str(gs_val) + " - acc : " + str(
total_acc / step) + " - total loss: " + str(
total_loss / step) + " - " + data_conf.TRAIN_DIR + "_" + str(gs_val))
finally:
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
train_model()
eval_model()
| 12,860 | 39.316614 | 115 |
py
|
fat-albert
|
fat-albert-master/abmn/src/movieqa/convert_movieqa_to_python3.py
|
import os
import re
def read_lines(fname):
with open(fname, "r") as f:
return f.readlines()
def write_lines(lines, fname):
with open(fname, "w") as f:
for line in lines:
f.write(line)
def convert_print_statements(py2_lines):
py3_lines = []
for py2_line in py2_lines:
# print("py2 line %s" % py2_line)
print_statement = re.match(r'(.*)print ["\'](.*)["\'](.*)', py2_line)
if print_statement:
# print("found print statement %s" % py2_line)
indent = print_statement.group(1)
statement = print_statement.group(2)
print_args = print_statement.group(3)
py3_line = "%sprint('%s')%s\n" % (indent, statement, print_args)
# print("converted print statement to python 3 %s" % py3_line)
else:
py3_line = py2_line
py3_lines.append(py3_line)
return py3_lines
def convert_iteritems(py2_lines):
py3_lines = []
for py2_line in py2_lines:
py3_line = py2_line.replace(".iteritems()", ".items()")
py3_lines.append(py3_line)
return py3_lines
def convert_movieqa_code():
scripts = ['data/data_loader.py', 'data/story_loader.py']
for script in scripts:
py2_script = script + "2"
os.rename(script, script + "2")
py2_lines = read_lines(py2_script)
py3_lines = convert_print_statements(py2_lines)
py3_lines = convert_iteritems(py3_lines)
write_lines(py3_lines, script)
if __name__ == "__main__":
convert_movieqa_code()
| 1,571 | 24.770492 | 77 |
py
|
fat-albert
|
fat-albert-master/abmn/src/movieqa/preprocess.py
|
# prepare word embeddings, records and vocabulary
import sys
import os
import numpy as np
import tensorflow as tf
import _pickle as pickle
present_path = os.path.dirname(os.path.realpath(sys.argv[0]))
sys.path.append(os.path.join(present_path, '../'))
import core.util as util
import movieqa.data_conf as data_conf
if not os.path.exists(data_conf.DATA_PATH):
print(
"ERROR: could not find MovieqQA data folder."
"Please manually download the dataset and save the files to: %s" % data_conf.DATA_PATH)
exit(1)
sys.path.append('data')
import movieqa.data.data_loader as movie
plot_dict = {}
# creation of data for one set
def create_movieqa_data(qa_json_file, name, outfolder, embeddings, qa_ids=None):
valid_count = 0
movie.cfg.QA_JSON = qa_json_file
print("Preprocessing qa file and creating records for %s" % movie.cfg.QA_JSON)
mqa = movie.DataLoader()
story, qa = mqa.get_story_qa_data(name, 'split_plot')
set_path = outfolder + "/" + name + ".tfrecords"
writer = tf.io.TFRecordWriter(set_path)
# filter questions by ids
if qa_ids:
qa = filter_qa(qa, qa_ids)
print("Selected %d questions based on %d provided ids" % (len(qa), len(qa_ids)))
with open(os.path.join(outfolder, 'val.pickle'), 'wb') as handle:
pickle.dump(qa, handle)
for k, question in enumerate(qa):
q = []
ans = []
l = np.zeros(shape=[5], dtype=float)
ex = tf.train.SequenceExample()
words = util.normalize_text(question.question)
# lowercase now
words = [word.lower() for word in words]
movie_id = question.imdb_key
question_size = len(words)
if name != "test":
l[question.correct_index] = 1.0
if words[0] in util.question_types:
question_type = util.question_types[words[0]]
else:
question_type = -1
ex.context.feature["question_type"].int64_list.value.append(question_type)
for i, word in enumerate(words):
if i < data_conf.Q_MAX_WORD_PER_SENT_COUNT:
w_vec = (util.get_word_vector(embeddings, word, data_conf.EMBEDDING_SIZE))
if not w_vec:
w_vec = (util.get_word_vector(embeddings, word, data_conf.EMBEDDING_SIZE))
q.append(w_vec)
if not movie_id in plot_dict:
plot = story.get(movie_id)
p_word_ids = create_plot_record(embeddings, plot, movie_id)
plot_dict[movie_id] = p_word_ids
else:
p_word_ids = plot_dict[movie_id]
for i, answer in enumerate(question.answers):
a = []
words = util.normalize_text(answer)
for j, word in enumerate(words):
if j < data_conf.Q_MAX_WORD_PER_SENT_COUNT:
w_vec = (util.get_word_vector(embeddings, word, data_conf.EMBEDDING_SIZE))
if not w_vec:
w_vec = (util.get_word_vector(embeddings, word, data_conf.EMBEDDING_SIZE))
a.append(w_vec)
ans.append(a)
q_type_feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=[question_type]))
q_size_feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=[question_size]))
movie_id_feature = tf.train.Feature(
bytes_list=tf.train.BytesList(value=[str.encode(movie_id)]))
label_list_feature = [
tf.train.Feature(float_list=tf.train.FloatList(value=[label]))
for label in l]
answer_list_feature = [
tf.train.Feature(int64_list=tf.train.Int64List(value=aw))
for aw in ans]
plot_list_feature = [
tf.train.Feature(int64_list=tf.train.Int64List(value=pl))
for pl in p_word_ids]
question_list_feature = [tf.train.Feature(int64_list=tf.train.Int64List(
value=q))]
feature_list = {
"labels": tf.train.FeatureList(
feature=label_list_feature),
"answers": tf.train.FeatureList(
feature=answer_list_feature),
"question": tf.train.FeatureList(
feature=question_list_feature),
"plot": tf.train.FeatureList(
feature=plot_list_feature),
}
context = tf.train.Features(feature={
"question_type": q_type_feature,
"question_size": q_size_feature,
"movie_id": movie_id_feature
})
feature_lists = tf.train.FeatureLists(feature_list=feature_list)
example_sequence = tf.train.SequenceExample(
feature_lists=feature_lists, context=context)
serialized = example_sequence.SerializeToString()
writer.write(serialized)
valid_count += 1
print(name + ' set completed - files written to ' + set_path)
def filter_qa(qas, qa_ids):
return [qa for qa in qas if qa.qid in qa_ids]
# create vocab ids for plot
def create_plot_record(embeddings, plot, movie_id):
p = []
sent_lens = []
plot_size = 0
for i, pl in enumerate(plot):
words = util.normalize_text(plot[i])
if (len(words) > 0) and (words[0] != ''):
p_sent = []
word_count = 0
plot_size += 1
for j, word in enumerate(words):
if (j < data_conf.P_MAX_WORD_PER_SENT_COUNT) and (plot_size < data_conf.P_MAX_SENT_COUNT):
p_sent.append(util.get_word_vector(embeddings, word, data_conf.EMBEDDING_SIZE))
word_count += 1
sent_lens.append(word_count)
p.append(p_sent)
return p
def create_data_set(dataset_file, setname, outfolder, embeddings):
if not os.path.exists(outfolder):
os.makedirs(outfolder)
create_movieqa_data(dataset_file, setname, outfolder, embeddings)
def load_and_create_vocab():
print('loading glove model...')
embeddings = util.loadGloveModel(data_conf.PRETRAINED_EMBEDDINGS_PATH)
print('loading complete - loaded model with ' + str(len(embeddings)) + ' words')
util.init_vocab(data_conf.EMBEDDING_SIZE)
# use previous vocab for adding new words in adversarial paraphrasing
util.restore_vocab(data_conf.EMBEDDING_DIR)
return embeddings
def create_complete_dataset():
embeddings = load_and_create_vocab()
create_data_set(movie.cfg.QA_JSON, 'train', data_conf.TRAIN_RECORD_PATH, embeddings)
create_data_set(data_conf.EVAL_FILE, 'val', data_conf.EVAL_RECORD_PATH, embeddings)
create_data_set(movie.cfg.QA_JSON, 'test', data_conf.TEST_RECORD_PATH, embeddings)
print("saving embeddings")
util.save_embeddings(data_conf.EMBEDDING_DIR, data_conf.EMBEDDING_SIZE)
def create_validation_dataset(split):
print("Prepare embeddings for modified input ...")
embeddings = load_and_create_vocab()
create_movieqa_data(data_conf.EVAL_FILE, split, data_conf.EVAL_RECORD_PATH, embeddings)
# save updated vocab file with additional new words
new_vocab_size = util.save_embeddings(data_conf.EMBEDDING_DIR, data_conf.EMBEDDING_SIZE)
return new_vocab_size
def read_qa_ids(filename):
with open(filename, "r") as f:
return [id.strip() for id in f.readlines()]
def create_200_random_validation_dataset(qa_ids_file):
embeddings = load_and_create_vocab()
outfolder = os.path.join(data_conf.RECORD_DIR, 'val_random_200')
if not os.path.exists(outfolder):
os.makedirs(outfolder)
qa_ids = read_qa_ids(qa_ids_file)
create_movieqa_data(movie.cfg.QA_JSON, 'val', outfolder, embeddings, qa_ids)
print("saving embeddings")
util.save_embeddings(data_conf.EMBEDDING_DIR, data_conf.EMBEDDING_SIZE)
if __name__ == "__main__":
qa_ids_file = sys.argv[1] # 'data/200_random_validation_qas_white_box_attacks.txt'
create_200_random_validation_dataset(qa_ids_file)
| 7,931 | 33.637555 | 106 |
py
|
fat-albert
|
fat-albert-master/abmn/src/movieqa/data/data/data_loader.py
|
"""MovieQA - Story Understanding Benchmark.
Data loader for reading movies and multiple-choice QAs
http://movieqa.cs.toronto.edu/
Release: v1.0
Date: 18 Nov 2015
"""
from collections import namedtuple
import json
import config as cfg
import story_loader
TextSource = namedtuple('TextSource', 'plot dvs subtitle script')
# TODO: add characters info
MovieInfo = namedtuple('Movie', 'name year genre text video')
QAInfo = namedtuple('QAInfo',
'qid question answers correct_index imdb_key plot_alignment')
class DataLoader(object):
"""MovieQA: Data loader class"""
def __init__(self):
self.load_me_stories = story_loader.StoryLoader()
self.movies_map = dict()
self.qa_list = list()
self.data_split = dict()
self._populate_movie()
self._populate_splits()
self._populate_qa()
print('Initialized MovieQA data loader!')
# region Initialize and Load class data
def _populate_movie(self):
"""Create a map of (imdb_key, MovieInfo) and its inversed map.
"""
with open(cfg.MOVIES_JSON, 'r') as f:
movies_json = json.load(f)
for movie in movies_json:
t = movie['text']
ts = TextSource(t['plot'], t['dvs'], t['subtitle'], t['script'])
vs = None
self.movies_map[movie['imdb_key']] = MovieInfo(
movie['name'], movie['year'], movie['genre'], ts, vs)
self.movies_map_inv = {(v.name + ' ' + v.year):k
for k, v in self.movies_map.items()}
def _populate_qa(self):
"""Create a list of QaInfo for all question and answers.
"""
with open(cfg.QA_JSON, 'r') as f:
qa_json = json.load(f)
for qa in qa_json:
self.qa_list.append(
QAInfo(qa['qid'], qa['question'], qa['answers'], qa['correct_index'],
qa['imdb_key'], qa['plot_alignment']))
def _populate_splits(self):
"""Get the list of movies in each split.
"""
with open(cfg.SPLIT_JSON, 'r') as f:
self.data_split = json.load(f)
# endregion
# region Pretty-Print :)
def pprint_qa(self, qa):
"""Pretty print a QA.
"""
print('----------------------------------------')
movie = self.movies_map[qa.imdb_key]
print('Movie: %s %s' % (movie.name, movie.year))
print('Question: %s' % qa.question)
print('Options:')
for k, ans in enumerate(qa.answers):
if qa.correct_index == k:
print('***',)
print('\t%s' % ans)
print('----------------------------------------')
def pprint_movie(self, movie):
"""Pretty print a Movie.
"""
print('----------------------------------------')
print('Movie: %s %s' % (movie.name, movie.year))
print('Genre: %s' % movie.genre)
print('Available texts:')
for k, v in movie.text._asdict().items():
print('%s: %s' % (k.rjust(12), v))
print('----------------------------------------')
# endregion
def get_split_movies(self, split):
"""Get the list of movies in this split.
Raises:
ValueError: If input split type is unrecognized.
"""
this_split_movies = []
if split == 'train':
this_split_movies = self.data_split['train']
elif split == 'val':
this_split_movies = self.data_split['val']
elif split == 'test':
this_split_movies = self.data_split['test']
elif split == 'full':
this_split_movies = list(self.data_split['train'])
this_split_movies.extend(self.data_split['val'])
this_split_movies.extend(self.data_split['test'])
else:
raise ValueError('Invalid split type. Use "train", "val", "test", or "full"')
return this_split_movies
def get_story_qa_data(self, split='train', story_type='plot'):
"""Provide data based on a particular split and story-type.
Args:
split: 'train' OR 'val' OR 'test' OR 'full'.
story_type: 'plot', 'split_plot', 'subtitle', 'dvs', 'script'.
Returns:
story: Story for each movie indexed by imdb_key.
qa: The list of QAs in this split.
"""
this_split_movies = self.get_split_movies(split)
# Load story
this_movies_map = {k: v for k, v in self.movies_map.items()
if k in this_split_movies}
story = self.load_me_stories.load_story(this_movies_map, story_type)
# Restrict this split movies to ones which have a story,
# get restricted QA list
this_split_movies = [m for m in this_split_movies if m in story]
qa = [qa for qa in self.qa_list if qa.imdb_key in this_split_movies]
return story, qa
def get_video_list(self, split='train', video_type='qa_clips'):
"""Provide data for a particular split and video type.
Args:
split: 'train' OR 'val' OR 'test' OR 'full'.
video_type: 'qa_clips', 'all_clips'.
Returns:
video_list: List of videos indexed by qid (for clips) or movie (for full).
qa: The list of QAs in this split.
Raises:
ValueError: If input video type is unrecognized.
"""
this_split_movies = self.get_split_movies(split)
# Get all video QAs
qa = [qa for qa in self.qa_list if qa.video_clips and qa.imdb_key in this_split_movies]
video_list = {}
if video_type == 'qa_clips':
# add each qa's video clips to video list
for qa_one in qa:
video_list.update({qa_one.qid:qa_one.video_clips})
elif video_type == 'all_clips':
# collect list of clips by movie
for qa_one in qa:
if qa_one.imdb_key in video_list.keys():
video_list[qa_one.imdb_key].extend(qa_one.video_clips)
else:
video_list.update({qa_one.imdb_key:list(qa_one.video_clips)})
# keep non-repeated clips
for imdb_key in video_list.keys():
video_list[imdb_key] = list(set(video_list[imdb_key]))
else:
raise ValueError('Invalid video type. Use "qa_clips" or "all_clips"')
return video_list, qa
| 6,504 | 32.188776 | 95 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.