hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a23c631e3dd2c756a6d94518dd902a850aaee56b
| 5,005
|
py
|
Python
|
data/dataset_implementations/rnn/single_sequence_dataset.py
|
pdeubel/world-models-testing
|
36f2baf79898452e677fe141f11ba434f92e9218
|
[
"MIT"
] | null | null | null |
data/dataset_implementations/rnn/single_sequence_dataset.py
|
pdeubel/world-models-testing
|
36f2baf79898452e677fe141f11ba434f92e9218
|
[
"MIT"
] | null | null | null |
data/dataset_implementations/rnn/single_sequence_dataset.py
|
pdeubel/world-models-testing
|
36f2baf79898452e677fe141f11ba434f92e9218
|
[
"MIT"
] | null | null | null |
import os
import h5py
import numpy as np
import torch
from torch.utils.data import Dataset
| 46.342593
| 119
| 0.710689
|
import os
import h5py
import numpy as np
import torch
from torch.utils.data import Dataset
class GUISingleSequenceDataset(Dataset):
def __init__(self, root_dir: str, sequence_length: int, vae_preprocessed_data_path: str, hdf5_data_group_path: str,
actions_transformation_function=None, rewards_transformation_function=None):
self.root_dir = root_dir
self.sequence_length = sequence_length
self.vae_preprocessed_data_path = vae_preprocessed_data_path
self.hdf5_data_group_path = hdf5_data_group_path
self.actions_transformation_function = actions_transformation_function
self.rewards_transformation_function = rewards_transformation_function
with np.load(os.path.join(self.root_dir, "data.npz")) as data:
self.rewards: torch.Tensor = torch.from_numpy(data["rewards"]).unsqueeze(-1)
self.actions: torch.Tensor = torch.from_numpy(data["actions"])
self.vae_preprocessed_data = h5py.File(vae_preprocessed_data_path, "r")
self.mus = self.vae_preprocessed_data[f"{self.hdf5_data_group_path}/mus"]
self.log_vars = self.vae_preprocessed_data[f"{self.hdf5_data_group_path}/log_vars"]
self.dataset_length = self.rewards.size(0) - self.sequence_length
assert self.rewards.size(0) == self.actions.size(0) == (self.mus.shape[0] - 1) == (self.log_vars.shape[0] - 1)
assert self.__len__() > 0, ("Dataset length is 0 or negative, probably too large sequence length or too few "
"data samples")
def __len__(self):
return self.dataset_length
def __getitem__(self, index):
sub_sequence_mus = self.mus[index:index + self.sequence_length + 1]
sub_sequence_log_vars = self.log_vars[index:index + self.sequence_length + 1]
mus = sub_sequence_mus[:-1]
next_mus = sub_sequence_mus[1:]
log_vars = sub_sequence_log_vars[:-1]
next_log_vars = sub_sequence_log_vars[1:]
rewards = self.rewards[index:index + self.sequence_length]
actions = self.actions[index:index + self.sequence_length]
if self.rewards_transformation_function is not None:
rewards = self.rewards_transformation_function(rewards)
if self.actions_transformation_function is not None:
actions = self.actions_transformation_function(actions)
return mus, next_mus, log_vars, next_log_vars, rewards, actions
class GUISingleSequenceShiftedDataset(Dataset):
def __init__(self, root_dir: str, sequence_length: int, vae_preprocessed_data_path: str, hdf5_data_group_path: str,
actions_transformation_function=None, rewards_transformation_function=None):
self.root_dir = root_dir
self.sequence_length = sequence_length
self.vae_preprocessed_data_path = vae_preprocessed_data_path
self.hdf5_data_group_path = hdf5_data_group_path
self.actions_transformation_function = actions_transformation_function
self.rewards_transformation_function = rewards_transformation_function
with np.load(os.path.join(self.root_dir, "data.npz")) as data:
self.rewards: torch.Tensor = torch.from_numpy(data["rewards"]).unsqueeze(-1)
self.actions: torch.Tensor = torch.from_numpy(data["actions"])
self.vae_preprocessed_data = h5py.File(vae_preprocessed_data_path, "r")
self.mus = self.vae_preprocessed_data[f"{self.hdf5_data_group_path}/mus"]
self.log_vars = self.vae_preprocessed_data[f"{self.hdf5_data_group_path}/log_vars"]
self.dataset_length = self.rewards.size(0) // self.sequence_length
assert self.rewards.size(0) == self.actions.size(0) == (self.mus.shape[0] - 1) == (self.log_vars.shape[0] - 1)
assert self.__len__() > 0, ("Dataset length is 0 or negative, probably too large sequence length or too few "
"data samples")
def __len__(self):
return self.dataset_length
def __getitem__(self, index):
index_start_point = index * self.sequence_length
sub_sequence_mus = self.mus[index_start_point:index_start_point + self.sequence_length + 1]
sub_sequence_log_vars = self.log_vars[index_start_point:index_start_point + self.sequence_length + 1]
mus = sub_sequence_mus[:-1]
next_mus = sub_sequence_mus[1:]
log_vars = sub_sequence_log_vars[:-1]
next_log_vars = sub_sequence_log_vars[1:]
rewards = self.rewards[index_start_point:index_start_point + self.sequence_length]
actions = self.actions[index_start_point:index_start_point + self.sequence_length]
if self.rewards_transformation_function is not None:
rewards = self.rewards_transformation_function(rewards)
if self.actions_transformation_function is not None:
actions = self.actions_transformation_function(actions)
return mus, next_mus, log_vars, next_log_vars, rewards, actions
| 4,658
| 45
| 208
|
fa3a643a8c4d5d570be2001b96c00037782267da
| 1,110
|
py
|
Python
|
release/stubs.min/System/__init___parts/DivideByZeroException.py
|
tranconbv/ironpython-stubs
|
a601759e6c6819beff8e6b639d18a24b7e351851
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/__init___parts/DivideByZeroException.py
|
tranconbv/ironpython-stubs
|
a601759e6c6819beff8e6b639d18a24b7e351851
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/__init___parts/DivideByZeroException.py
|
tranconbv/ironpython-stubs
|
a601759e6c6819beff8e6b639d18a24b7e351851
|
[
"MIT"
] | null | null | null |
class DivideByZeroException(ArithmeticException):
"""
The exception that is thrown when there is an attempt to divide an integral or decimal value by zero.
DivideByZeroException()
DivideByZeroException(message: str)
DivideByZeroException(message: str,innerException: Exception)
"""
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return DivideByZeroException()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,message=None,innerException=None):
"""
__new__(cls: type)
__new__(cls: type,message: str)
__new__(cls: type,message: str,innerException: Exception)
__new__(cls: type,info: SerializationInfo,context: StreamingContext)
"""
pass
SerializeObjectState=None
| 34.6875
| 215
| 0.726126
|
class DivideByZeroException(ArithmeticException):
"""
The exception that is thrown when there is an attempt to divide an integral or decimal value by zero.
DivideByZeroException()
DivideByZeroException(message: str)
DivideByZeroException(message: str,innerException: Exception)
"""
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return DivideByZeroException()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,message=None,innerException=None):
"""
__new__(cls: type)
__new__(cls: type,message: str)
__new__(cls: type,message: str,innerException: Exception)
__new__(cls: type,info: SerializationInfo,context: StreamingContext)
"""
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
SerializeObjectState=None
| 28
| 0
| 48
|
3d85f4be772c1a036395de1aa5c734416e429682
| 11,772
|
py
|
Python
|
core/tenhou/log.py
|
SakuraSa/TenhouLoggerX
|
7d6bcfb7e22d631673c61321f3af1c05ec011db5
|
[
"MIT"
] | 2
|
2016-09-19T16:33:29.000Z
|
2017-12-09T01:02:39.000Z
|
core/tenhou/log.py
|
SakuraSa/TenhouLoggerX
|
7d6bcfb7e22d631673c61321f3af1c05ec011db5
|
[
"MIT"
] | null | null | null |
core/tenhou/log.py
|
SakuraSa/TenhouLoggerX
|
7d6bcfb7e22d631673c61321f3af1c05ec011db5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
"""
core.tenhou.log
"""
__author__ = 'Rnd495'
import os
import json
import datetime
import urllib
from core.configs import Configs
configs = Configs.instance()
class Log(object):
"""
Log
"""
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@staticmethod
@staticmethod
@staticmethod
class StatisticForSubLog(object):
"""
StatisticForSubLog
"""
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
def get_results(ref_list, player_name):
"""
do statistics on given refs for given player
result dict format (example value is avg value on data set 2015/05/15) :
{
fulu_chong : 0.3940,
dama : 0.1165,
win_time : 11.50,
chong : 0.1347,
win : 0.2484,
win_point : 6690,
ends_listening : 0.5170,
fulu : 0.3717,
after_richi : 0.0288,
now_line_days : 3.71,
max_line_days : 16.67,
first_richi : 0.1597,
count : 1000,
}
:param ref_list: ref list
:param player_name: player name
:return: result dict
"""
counter = {}
adder = {}
game_date_text_set = set()
ref_counter = 0
for ref in ref_list:
ref_counter += 1
log = Log(ref)
game_date_text_set.add(log.time.strftime("%Y%m%d"))
player_index = log.get_player_index(player_name)
if player_index < 0:
# should not be here
continue
for sub_log in log.sub_log:
statistics = StatisticForSubLog(sub_log)
results = statistics.get_result(player_index)
for key, value in results.iteritems():
if value is not None:
counter[key] = counter.get(key, 0) + 1
adder[key] = adder.get(key, 0) + value
results = {}
for key, value in counter.iteritems():
results[key] = (adder[key] / float(value)) if value else 0
max_line_days = now_line_days = 0
last_date = None
for date_text in sorted(game_date_text_set):
now_date = datetime.datetime.strptime(date_text, "%Y%m%d")
if last_date:
if int((now_date - last_date).days) <= 1:
now_line_days += 1
max_line_days = max(max_line_days, now_line_days)
else:
now_line_days = 1
last_date = now_date
results['max_line_days'] = max_line_days
results['now_line_days'] = now_line_days
results['count'] = ref_counter
return results
if __name__ == '__main__':
import time
from sqlalchemy import func, desc
from core.models import get_new_session, PlayerLog
session = get_new_session()
counter = func.count(PlayerLog.name)
query = session.query(PlayerLog.name).filter((PlayerLog.lobby == '0000') & (PlayerLog.name != 'NoName')) \
.group_by(PlayerLog.name).having(counter >= 50).order_by(desc(counter))
results = {}
for name in (row[0] for row in query):
start_time = time.time()
query = session.query(PlayerLog.ref).filter((PlayerLog.name == name) & (PlayerLog.lobby == '0000'))
refs = [row[0] for row in query]
results[name] = get_results(refs, name)
size = len(refs)
time_cost = time.time() - start_time
hz = size / time_cost
print '%6d' % size, '%.2fs' % time_cost, '%.2fHz' % hz, name
session.close()
data_lists = {}
for row in results.itervalues():
for key, value in row.iteritems():
data_lists.setdefault(key, []).append(value)
print ''
print '%20s' % 'type', '%6s' % 'avg', '%6s' % 'max', '%6s' % 'min', '%6s' % 'mu'
# import numpy as np
from scipy.stats import norm
# import matplotlib.pyplot as plt
for key, data_list in data_lists.iteritems():
avg = sum(data_list) / float(len(data_list))
mu, std = norm.fit(data_list)
print '%20s' % key, format_data(avg), format_data(max(data_list)), format_data(min(data_list)), format_data(
mu), std
#
# # Plot the histogram.
# plt.hist(data_list, bins=25, normed=True, alpha=0.6, color='g')
#
# # Plot the PDF.
# xmin, xmax = plt.xlim()
# x = np.linspace(xmin, xmax, 100)
# p = norm.pdf(x, mu, std)
# plt.plot(x, p, 'k', linewidth=2)
# title = "%s fit results: mu = %.2f, std = %.2f" % (key, mu, std)
# plt.title(title)
#
# plt.show()
| 29.802532
| 116
| 0.569147
|
#!/usr/bin/env python
# coding=utf-8
"""
core.tenhou.log
"""
__author__ = 'Rnd495'
import os
import json
import datetime
import urllib
from core.configs import Configs
configs = Configs.instance()
class Log(object):
"""
Log
"""
def __init__(self, ref):
with open(Log.get_file_name(ref), 'rb') as file_handle:
self.json = json.load(file_handle)
# cache
self._scores = None
self._rankings = None
@property
def size(self):
return len(self.names)
@property
def sub_log(self):
return self.json['log']
@property
def ref(self):
return self.json['ref']
@property
def rule(self):
return self.json['rule']['disp']
@property
def lobby(self):
return self.ref.split('-')[2]
@property
def rule_code(self):
return self.ref.split('-')[1]
@property
def dans(self):
return self.json['dan']
@property
def names(self):
return self.json['name']
@property
def scores(self):
if not self._scores:
g = iter(self.json['sc'])
self._scores = zip(g, g)
return self._scores
@property
def time(self):
return datetime.datetime.strptime(self.ref[:10], '%Y%m%d%H')
@property
def points(self):
return [point[0] for point in self.scores]
@property
def pts(self):
return [point[1] for point in self.scores]
@property
def rankings(self):
if not self._rankings:
point_sorted = sorted(((point, index) for index, point in enumerate(self.points)), reverse=True)
self._rankings = [None] * len(point_sorted)
for ranking, (_, index) in enumerate(point_sorted):
self._rankings[index] = ranking
return self._rankings
@property
def rates(self):
return self.json['rate']
@staticmethod
def check_exists(ref):
return os.path.exists(Log.get_file_name(ref))
@staticmethod
def get_file_name(ref):
return os.path.join(configs.tenhou_log_dir, '%s.json' % ref)
@staticmethod
def iter_all():
for root, dirs, files in os.walk(configs.tenhou_log_dir):
for file_name in files:
ref = os.path.splitext(file_name)[0]
yield Log(ref)
def get_player_index(self, name):
try:
return self.names.index(name)
except ValueError:
return None
def get_tenhou_link(self, tw_name=None):
base = "/watch/log?"
params = {'ref': self.ref}
for i, name in enumerate(self.names):
if isinstance(name, unicode):
name = name.encode("utf-8")
params['UN%d' % i] = name
tw = None
if tw_name:
tw = self.get_player_index(tw_name)
if tw is not None:
params['tw'] = tw
return base + urllib.urlencode(params)
class StatisticForSubLog(object):
"""
StatisticForSubLog
"""
def __init__(self, sub_log):
self.sub_log = sub_log
self._richi_list = None
self._fulu_list = None
@property
def game_size(self):
return len(self.point_starts)
@property
def game_index(self):
return self.sub_log[0]
@property
def dora_pointers_out(self):
return self.sub_log[2]
@property
def dora_pointers_in(self):
return self.sub_log[3]
@property
def start_cards(self):
return self.sub_log[4:4 + 3 * self.game_size:3]
@property
def cards_ins(self):
return self.sub_log[5:5 + 3 * self.game_size:3]
@property
def cards_outs(self):
return self.sub_log[6:6 + 3 * self.game_size:3]
@property
def result_list(self):
return self.sub_log[16]
@property
def is_agari(self):
return self.result_description == u'和了'
@property
def result_description(self):
return self.result_list[0]
@property
def point_starts(self):
return self.sub_log[1]
@property
def point_changes(self):
return self.result_list[1::2]
@property
def richi_list(self):
if self._richi_list is None:
self._get_player_details()
return self._richi_list
@property
def is_fulu_list(self):
if self._fulu_list is None:
self._get_player_details()
return self._fulu_list
def _get_player_details(self):
self._richi_list = [None] * self.game_size
self._fulu_list = [False] * self.game_size
# scan card outs
for player_index, card_out in enumerate(self.cards_outs):
for time_index, action in enumerate(card_out):
if self._richi_list[player_index] is not None:
break
if self._fulu_list[player_index]:
break
if not isinstance(action, int):
if action.startswith('r'):
self._richi_list[player_index] = (time_index, action)
else:
self._fulu_list[player_index] = True
# scan card ins
for player_index, card_in in enumerate(self.cards_ins):
for time_index, action in enumerate(card_in):
if self._richi_list[player_index] is not None:
richi_time, richi_action = self._richi_list[player_index]
if time_index >= richi_time:
break
if self._fulu_list[player_index]:
break
elif not isinstance(action, int):
self._fulu_list[player_index] = True
def get_result(self, player_index):
# attack
point_change = sum(sc[player_index] for sc in self.point_changes)
win = self.is_agari and point_change > 0
win_point = point_change if win else None
# speed
first_richi = self.richi_list[player_index]
if first_richi:
for richi in self.richi_list:
if richi is not None and richi[0] < first_richi[0]:
first_richi = False
break
first_richi = bool(first_richi)
win_time = None
if win:
win_time = len(self.cards_ins[player_index])
# int
dama = None
if win:
dama = not self.is_fulu_list[player_index] and not self.richi_list[player_index]
ends_listening = None
if self.result_description == u'全員聴牌':
ends_listening = True
elif self.result_description == u'流局':
ends_listening = point_change > 0
# def
someone_chong = self.result_description == u'和了' and \
len(self.point_changes) == 1 and \
sum(p < 0 for p in self.point_changes[0]) == 1
chong = someone_chong and point_change < 0
fulu_chong = None
if chong:
fulu_chong = self.is_fulu_list[player_index]
# brave
after_richi = not first_richi and bool(self.richi_list[player_index])
fulu = self.is_fulu_list[player_index]
return dict(
win=win, win_point=win_point,
first_richi=first_richi, win_time=win_time,
dama=dama, ends_listening=ends_listening,
chong=chong, fulu_chong=fulu_chong,
after_richi=after_richi, fulu=fulu
)
def get_results(ref_list, player_name):
"""
do statistics on given refs for given player
result dict format (example value is avg value on data set 2015/05/15) :
{
fulu_chong : 0.3940,
dama : 0.1165,
win_time : 11.50,
chong : 0.1347,
win : 0.2484,
win_point : 6690,
ends_listening : 0.5170,
fulu : 0.3717,
after_richi : 0.0288,
now_line_days : 3.71,
max_line_days : 16.67,
first_richi : 0.1597,
count : 1000,
}
:param ref_list: ref list
:param player_name: player name
:return: result dict
"""
counter = {}
adder = {}
game_date_text_set = set()
ref_counter = 0
for ref in ref_list:
ref_counter += 1
log = Log(ref)
game_date_text_set.add(log.time.strftime("%Y%m%d"))
player_index = log.get_player_index(player_name)
if player_index < 0:
# should not be here
continue
for sub_log in log.sub_log:
statistics = StatisticForSubLog(sub_log)
results = statistics.get_result(player_index)
for key, value in results.iteritems():
if value is not None:
counter[key] = counter.get(key, 0) + 1
adder[key] = adder.get(key, 0) + value
results = {}
for key, value in counter.iteritems():
results[key] = (adder[key] / float(value)) if value else 0
max_line_days = now_line_days = 0
last_date = None
for date_text in sorted(game_date_text_set):
now_date = datetime.datetime.strptime(date_text, "%Y%m%d")
if last_date:
if int((now_date - last_date).days) <= 1:
now_line_days += 1
max_line_days = max(max_line_days, now_line_days)
else:
now_line_days = 1
last_date = now_date
results['max_line_days'] = max_line_days
results['now_line_days'] = now_line_days
results['count'] = ref_counter
return results
if __name__ == '__main__':
import time
from sqlalchemy import func, desc
from core.models import get_new_session, PlayerLog
session = get_new_session()
counter = func.count(PlayerLog.name)
query = session.query(PlayerLog.name).filter((PlayerLog.lobby == '0000') & (PlayerLog.name != 'NoName')) \
.group_by(PlayerLog.name).having(counter >= 50).order_by(desc(counter))
results = {}
for name in (row[0] for row in query):
start_time = time.time()
query = session.query(PlayerLog.ref).filter((PlayerLog.name == name) & (PlayerLog.lobby == '0000'))
refs = [row[0] for row in query]
results[name] = get_results(refs, name)
size = len(refs)
time_cost = time.time() - start_time
hz = size / time_cost
print '%6d' % size, '%.2fs' % time_cost, '%.2fHz' % hz, name
session.close()
data_lists = {}
for row in results.itervalues():
for key, value in row.iteritems():
data_lists.setdefault(key, []).append(value)
def format_data(d):
if d < 1:
return '%6s' % ('%.2f%%' % (d * 100))
elif abs(d) < 100:
return '%6s' % ('%.2f' % d)
else:
return '%6s' % ('%d' % d)
print ''
print '%20s' % 'type', '%6s' % 'avg', '%6s' % 'max', '%6s' % 'min', '%6s' % 'mu'
# import numpy as np
from scipy.stats import norm
# import matplotlib.pyplot as plt
for key, data_list in data_lists.iteritems():
avg = sum(data_list) / float(len(data_list))
mu, std = norm.fit(data_list)
print '%20s' % key, format_data(avg), format_data(max(data_list)), format_data(min(data_list)), format_data(
mu), std
#
# # Plot the histogram.
# plt.hist(data_list, bins=25, normed=True, alpha=0.6, color='g')
#
# # Plot the PDF.
# xmin, xmax = plt.xlim()
# x = np.linspace(xmin, xmax, 100)
# p = norm.pdf(x, mu, std)
# plt.plot(x, p, 'k', linewidth=2)
# title = "%s fit results: mu = %.2f, std = %.2f" % (key, mu, std)
# plt.title(title)
#
# plt.show()
| 5,939
| 0
| 994
|
b97e642f766dedecd2b8dc7fbaf1c4aba0a274fb
| 5,267
|
py
|
Python
|
tests/parser/features/test_assert.py
|
upgradvisor/vyper
|
642884ea938a25793c1b2fac866e8458e63a7b49
|
[
"Apache-2.0"
] | 1
|
2021-12-20T16:19:47.000Z
|
2021-12-20T16:19:47.000Z
|
tests/parser/features/test_assert.py
|
upgradvisor/vyper
|
642884ea938a25793c1b2fac866e8458e63a7b49
|
[
"Apache-2.0"
] | 1
|
2022-03-19T00:45:47.000Z
|
2022-03-19T00:45:47.000Z
|
tests/parser/features/test_assert.py
|
upgradvisor/vyper
|
642884ea938a25793c1b2fac866e8458e63a7b49
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from eth_abi import decode_single
from eth_tester.exceptions import TransactionFailed
# web3 returns f"execution reverted: {err_str}"
# TODO move exception string parsing logic into assert_tx_failed
invalid_code = [
"""
@external
def test(a: int128) -> int128:
assert a > 1, ""
return 1 + a
""",
"""
@external
def test(a: int128) -> int128:
raise ""
""",
"""
@external
def test():
assert create_forwarder_to(self)
""",
]
@pytest.mark.parametrize("code", invalid_code)
valid_code = [
"""
@external
def mint(_to: address, _value: uint256):
raise
""",
"""
@internal
def ret1() -> int128:
return 1
@external
def test():
assert self.ret1() == 1
""",
"""
@internal
def valid_address(sender: address) -> bool:
selfdestruct(sender)
@external
def test():
assert self.valid_address(msg.sender)
""",
"""
@external
def test():
assert raw_call(msg.sender, b'', max_outsize=1, gas=10, value=1000*1000) == b''
""",
"""
@external
def test():
assert create_forwarder_to(self) == self
""",
]
@pytest.mark.parametrize("code", valid_code)
| 25.444444
| 94
| 0.65806
|
import pytest
from eth_abi import decode_single
from eth_tester.exceptions import TransactionFailed
# web3 returns f"execution reverted: {err_str}"
# TODO move exception string parsing logic into assert_tx_failed
def _fixup_err_str(s):
return s.replace("execution reverted: ", "")
def test_assert_refund(w3, get_contract_with_gas_estimation, assert_tx_failed):
code = """
@external
def foo():
assert 1 == 2
"""
c = get_contract_with_gas_estimation(code)
a0 = w3.eth.accounts[0]
gas_sent = 10 ** 6
tx_hash = c.foo(transact={"from": a0, "gas": gas_sent, "gasPrice": 10})
# More info on receipt status:
# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-658.md#specification.
tx_receipt = w3.eth.getTransactionReceipt(tx_hash)
assert tx_receipt["status"] == 0
# Checks for gas refund from revert
assert tx_receipt["gasUsed"] < gas_sent
def test_assert_reason(w3, get_contract_with_gas_estimation, assert_tx_failed, memory_mocker):
code = """
@external
def test(a: int128) -> int128:
assert a > 1, "larger than one please"
return 1 + a
@external
def test2(a: int128, b: int128, extra_reason: String[32]) -> int128:
c: int128 = 11
assert a > 1, "a is not large enough"
assert b == 1, concat("b may only be 1", extra_reason)
return a + b + c
@external
def test3(reason_str: String[32]):
raise reason_str
"""
c = get_contract_with_gas_estimation(code)
assert c.test(2) == 3
with pytest.raises(TransactionFailed) as e_info:
c.test(0)
assert _fixup_err_str(e_info.value.args[0]) == "larger than one please"
# a = 0, b = 1
with pytest.raises(TransactionFailed) as e_info:
c.test2(0, 1, "")
assert _fixup_err_str(e_info.value.args[0]) == "a is not large enough"
# a = 1, b = 0
with pytest.raises(TransactionFailed) as e_info:
c.test2(2, 2, " because I said so")
assert _fixup_err_str(e_info.value.args[0]) == "b may only be 1" + " because I said so"
# return correct value
assert c.test2(5, 1, "") == 17
with pytest.raises(TransactionFailed) as e_info:
c.test3("An exception")
assert _fixup_err_str(e_info.value.args[0]) == "An exception"
invalid_code = [
"""
@external
def test(a: int128) -> int128:
assert a > 1, ""
return 1 + a
""",
"""
@external
def test(a: int128) -> int128:
raise ""
""",
"""
@external
def test():
assert create_forwarder_to(self)
""",
]
@pytest.mark.parametrize("code", invalid_code)
def test_invalid_assertions(get_contract, assert_compile_failed, code):
assert_compile_failed(lambda: get_contract(code))
valid_code = [
"""
@external
def mint(_to: address, _value: uint256):
raise
""",
"""
@internal
def ret1() -> int128:
return 1
@external
def test():
assert self.ret1() == 1
""",
"""
@internal
def valid_address(sender: address) -> bool:
selfdestruct(sender)
@external
def test():
assert self.valid_address(msg.sender)
""",
"""
@external
def test():
assert raw_call(msg.sender, b'', max_outsize=1, gas=10, value=1000*1000) == b''
""",
"""
@external
def test():
assert create_forwarder_to(self) == self
""",
]
@pytest.mark.parametrize("code", valid_code)
def test_valid_assertions(get_contract, code):
get_contract(code)
def test_assert_staticcall(get_contract, assert_tx_failed, memory_mocker):
foreign_code = """
state: uint256
@external
def not_really_constant() -> uint256:
self.state += 1
return self.state
"""
code = """
interface ForeignContract:
def not_really_constant() -> uint256: view
@external
def test():
assert ForeignContract(msg.sender).not_really_constant() == 1
"""
c1 = get_contract(foreign_code)
c2 = get_contract(code, *[c1.address])
# static call prohibits state change
assert_tx_failed(lambda: c2.test())
def test_assert_in_for_loop(get_contract, assert_tx_failed, memory_mocker):
code = """
@external
def test(x: uint256[3]) -> bool:
for i in range(3):
assert x[i] < 5
return True
"""
c = get_contract(code)
c.test([1, 2, 3])
assert_tx_failed(lambda: c.test([5, 1, 3]))
assert_tx_failed(lambda: c.test([1, 5, 3]))
assert_tx_failed(lambda: c.test([1, 3, 5]))
def test_assert_with_reason_in_for_loop(get_contract, assert_tx_failed, memory_mocker):
code = """
@external
def test(x: uint256[3]) -> bool:
for i in range(3):
assert x[i] < 5, "because reasons"
return True
"""
c = get_contract(code)
c.test([1, 2, 3])
assert_tx_failed(lambda: c.test([5, 1, 3]))
assert_tx_failed(lambda: c.test([1, 5, 3]))
assert_tx_failed(lambda: c.test([1, 3, 5]))
def test_assert_reason_revert_length(w3, get_contract, memory_mocker):
code = """
@external
def test() -> int128:
assert 1 == 2, "oops"
return 1
"""
c = get_contract(code)
w3.manager.provider.ethereum_tester.backend.is_eip838_error = lambda err: False
with pytest.raises(TransactionFailed) as e_info:
c.test()
error_bytes = eval(_fixup_err_str(e_info.value.args[0]))
assert len(error_bytes) == 100
msg = decode_single("string", error_bytes[36:])
assert msg == "oops"
| 3,906
| 0
| 204
|
3f29ebd88cd6558019edc58f99d89c4d08dc0aae
| 8,654
|
py
|
Python
|
netl2api/l2api/__init__.py
|
locaweb/netl2api
|
f84c0362d1676c8771015b7cc48461e44a21c34d
|
[
"Apache-2.0"
] | 3
|
2015-04-08T18:50:02.000Z
|
2019-06-05T22:40:45.000Z
|
netl2api/l2api/__init__.py
|
locaweb/netl2api
|
f84c0362d1676c8771015b7cc48461e44a21c34d
|
[
"Apache-2.0"
] | null | null | null |
netl2api/l2api/__init__.py
|
locaweb/netl2api
|
f84c0362d1676c8771015b7cc48461e44a21c34d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8; -*-
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: Eduardo S. Scarpellini
# @author: Luiz Ozaki
__copyright__ = "Copyright 2012, Locaweb IDC"
from netl2api.l2api.exceptions import *
from netl2api.l2api.autocache import L2APIAutoCache
from netl2api.l2api.transport import SysSSHTransport #, TransportManager
__all__ = ["L2API"]
class L2API(L2APIAutoCache):
"""
Base class for L2 operations.
Vendor-specific classes should extend this, declare 'self.__VENDOR__' (vendor str),
'self.__HWTYPE__' (hardware type str), 'self.prompt_mark', 'self.error_mark' and
'self.config_term_cmd' (see transport classes for understand these three last parameters).
Ex.:
class ExampleVendorAPI(L2API):
def __init__(self, *args, **kwargs):
self.__VENDOR__ = "ExampleVendor"
self.__HWTYPE__ = "stackable_switch"
self.prompt_mark = "#"
self.error_mark = "% Error:"
self.config_term_cmd = "terminal length 0"
super(ExampleVendorAPI, self).__init__(*args, **kwargs)
...
def show_version(self):
...
def show_interfaces(self):
....
"""
# def __del__(self):
# if self.transport is not None:
# try:
# self.transport.close()
# except Exception:
# pass
| 40.064815
| 122
| 0.605616
|
#!/usr/bin/python
# -*- coding: utf-8; -*-
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: Eduardo S. Scarpellini
# @author: Luiz Ozaki
__copyright__ = "Copyright 2012, Locaweb IDC"
from netl2api.l2api.exceptions import *
from netl2api.l2api.autocache import L2APIAutoCache
from netl2api.l2api.transport import SysSSHTransport #, TransportManager
__all__ = ["L2API"]
class L2API(L2APIAutoCache):
"""
Base class for L2 operations.
Vendor-specific classes should extend this, declare 'self.__VENDOR__' (vendor str),
'self.__HWTYPE__' (hardware type str), 'self.prompt_mark', 'self.error_mark' and
'self.config_term_cmd' (see transport classes for understand these three last parameters).
Ex.:
class ExampleVendorAPI(L2API):
def __init__(self, *args, **kwargs):
self.__VENDOR__ = "ExampleVendor"
self.__HWTYPE__ = "stackable_switch"
self.prompt_mark = "#"
self.error_mark = "% Error:"
self.config_term_cmd = "terminal length 0"
super(ExampleVendorAPI, self).__init__(*args, **kwargs)
...
def show_version(self):
...
def show_interfaces(self):
....
"""
def __init__(self, host=None, port=None, username=None, passwd=None, transport=None):
super(L2API, self).__init__()
if not hasattr(self, "__VENDOR__"):
raise InvalidParameter("'self.__VENDOR__' is not defined (class '%s')" % self.__class__.__name__)
if not hasattr(self, "__HWTYPE__"):
raise InvalidParameter("'self.__HWTYPE__' is not defined (class '%s')" % self.__class__.__name__)
if not host or type(host) not in (str, unicode):
raise InvalidParameter("'host' parameter is not defined or invalid")
if not username or type(username) not in (str, unicode):
raise InvalidParameter("'username' parameter is not defined or invalid")
if not passwd or type(passwd) not in (str, unicode):
raise InvalidParameter("'passwd' parameter is not defined or invalid")
if not hasattr(self, "prompt_mark"):
self.prompt_mark = "#"
if not hasattr(self, "error_mark"):
self.error_mark = None
if not hasattr(self, "config_term_cmd"):
self.config_term_cmd = None
if not transport:
transport = SysSSHTransport.SysSSH
self.use_cache = True
self.cache_config = {
"show_system": { "ttl": 600,
"clear_on": [] },
"show_hostname": { "ttl": 600,
"clear_on": [] },
"show_version": { "ttl": 600,
"clear_on": [] },
"show_interfaces": { "ttl": 120,
"clear_on": ["enable_interface", "disable_interface",
"change_interface_description"] },
"show_lldp": { "ttl": 180,
"clear_on": [] },
"show_arp": { "ttl": 180,
"clear_on": [] },
"show_uplinks": { "ttl": 180,
"clear_on": [] },
"show_vlans": { "ttl": 180,
"clear_on": ["create_vlan", "destroy_vlan",
"enable_vlan", "disable_vlan",
"change_vlan_description",
"interface_attach_vlan", "interface_detach_vlan",
"lag_attach_vlan", "lag_detach_vlan"] },
"show_lags": { "ttl": 180,
"clear_on": ["create_lag", "destroy_lag",
"enable_lag", "disable_lag",
"change_lag_description",
"lag_attach_interface", "lag_detach_interface"] },
}
#self.transport = TransportManager.TransportPool(transport=transport, max_connections=2, host=host, port=port,
# username=username, passwd=passwd, prompt_mark=self.prompt_mark,
# error_mark=self.error_mark, config_term_cmd=self.config_term_cmd)
self.transport = transport(host=host, port=port, username=username, passwd=passwd, prompt_mark=self.prompt_mark,
error_mark=self.error_mark, config_term_cmd=self.config_term_cmd)
def dump_config(self):
raise NotImplementedError("Not implemented")
def save_config(self):
raise NotImplementedError("Not implemented")
def show_system(self):
raise NotImplementedError("Not implemented")
def show_hostname(self):
raise NotImplementedError("Not implemented")
def show_version(self):
raise NotImplementedError("Not implemented")
def show_interfaces(self, interface_id=None):
raise NotImplementedError("Not implemented")
def show_lldp(self, interface_id=None):
raise NotImplementedError("Not implemented")
def show_arp(self, interface_id=None):
raise NotImplementedError("Not implemented")
def show_uplinks(self):
raise NotImplementedError("Not implemented")
def show_vlans(self, vlan_id=None):
raise NotImplementedError("Not implemented")
def show_lags(self, lag_id=None):
raise NotImplementedError("Not implemented")
def create_vlan(self, vlan_id=None, vlan_description=None):
raise NotImplementedError("Not implemented")
def create_lag(self, lag_id=None, lag_description=None):
raise NotImplementedError("Not implemented")
def enable_interface(self, interface_id=None):
raise NotImplementedError("Not implemented")
def enable_vlan(self, vlan_id=None):
raise NotImplementedError("Not implemented")
def enable_lag(self, lag_id=None):
raise NotImplementedError("Not implemented")
def disable_interface(self, interface_id=None):
raise NotImplementedError("Not implemented")
def disable_vlan(self, vlan_id=None):
raise NotImplementedError("Not implemented")
def disable_lag(self, lag_id=None):
raise NotImplementedError("Not implemented")
def change_interface_description(self, interface_id=None, interface_description=None):
raise NotImplementedError("Not implemented")
def change_vlan_description(self, vlan_id=None, vlan_description=None):
raise NotImplementedError("Not implemented")
def change_lag_description(self, lag_id=None, lag_description=None):
raise NotImplementedError("Not implemented")
def destroy_vlan(self, vlan_id=None):
raise NotImplementedError("Not implemented")
def destroy_lag(self, lag_id=None):
raise NotImplementedError("Not implemented")
def interface_attach_vlan(self, interface_id=None, vlan_id=None, tagged=True):
raise NotImplementedError("Not implemented")
def interface_detach_vlan(self, interface_id=None, vlan_id=None, tagged=True):
raise NotImplementedError("Not implemented")
def lag_attach_vlan(self, lag_id=None, vlan_id=None, tagged=True):
raise NotImplementedError("Not implemented")
def lag_detach_vlan(self, lag_id=None, vlan_id=None, tagged=True):
raise NotImplementedError("Not implemented")
def lag_attach_interface(self, lag_id=None, interface_id=None):
raise NotImplementedError("Not implemented")
def lag_detach_interface(self, lag_id=None, interface_id=None):
raise NotImplementedError("Not implemented")
# def __del__(self):
# if self.transport is not None:
# try:
# self.transport.close()
# except Exception:
# pass
| 5,832
| 0
| 837
|
53e3d50dfd8819a70c242ed90be54300221236ee
| 12,531
|
py
|
Python
|
pymeasure/instruments/agilent/agilent8257D.py
|
dphaas/pymeasure
|
580c33bf5f1e409bb575c46bbd1df682bf27cfe1
|
[
"MIT"
] | null | null | null |
pymeasure/instruments/agilent/agilent8257D.py
|
dphaas/pymeasure
|
580c33bf5f1e409bb575c46bbd1df682bf27cfe1
|
[
"MIT"
] | null | null | null |
pymeasure/instruments/agilent/agilent8257D.py
|
dphaas/pymeasure
|
580c33bf5f1e409bb575c46bbd1df682bf27cfe1
|
[
"MIT"
] | null | null | null |
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2022 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from pymeasure.instruments import Instrument
from pymeasure.instruments.validators import truncated_range, strict_discrete_set
class Agilent8257D(Instrument):
"""Represents the Agilent 8257D Signal Generator and
provides a high-level interface for interacting with
the instrument.
.. code-block:: python
generator = Agilent8257D("GPIB::1")
generator.power = 0 # Sets the output power to 0 dBm
generator.frequency = 5 # Sets the output frequency to 5 GHz
generator.enable() # Enables the output
"""
power = Instrument.control(
":POW?;", ":POW %g dBm;",
""" A floating point property that represents the output power
in dBm. This property can be set.
"""
)
frequency = Instrument.control(
":FREQ?;", ":FREQ %e Hz;",
""" A floating point property that represents the output frequency
in Hz. This property can be set.
"""
)
start_frequency = Instrument.control(
":SOUR:FREQ:STAR?", ":SOUR:FREQ:STAR %e Hz",
""" A floating point property that represents the start frequency
in Hz. This property can be set.
"""
)
center_frequency = Instrument.control(
":SOUR:FREQ:CENT?", ":SOUR:FREQ:CENT %e Hz;",
""" A floating point property that represents the center frequency
in Hz. This property can be set.
"""
)
stop_frequency = Instrument.control(
":SOUR:FREQ:STOP?", ":SOUR:FREQ:STOP %e Hz",
""" A floating point property that represents the stop frequency
in Hz. This property can be set.
"""
)
start_power = Instrument.control(
":SOUR:POW:STAR?", ":SOUR:POW:STAR %e dBm",
""" A floating point property that represents the start power
in dBm. This property can be set.
"""
)
stop_power = Instrument.control(
":SOUR:POW:STOP?", ":SOUR:POW:STOP %e dBm",
""" A floating point property that represents the stop power
in dBm. This property can be set.
"""
)
dwell_time = Instrument.control(
":SOUR:SWE:DWEL1?", ":SOUR:SWE:DWEL1 %.3f",
""" A floating point property that represents the settling time
in seconds at the current frequency or power setting.
This property can be set.
"""
)
step_points = Instrument.control(
":SOUR:SWE:POIN?", ":SOUR:SWE:POIN %d",
""" An integer number of points in a step sweep. This property
can be set.
"""
)
is_enabled = Instrument.measurement(
":OUTPUT?",
""" Reads a boolean value that is True if the output is on. """,
cast=bool
)
has_modulation = Instrument.measurement(
":OUTPUT:MOD?",
""" Reads a boolean value that is True if the modulation is enabled. """,
cast=bool
)
########################
# Amplitude modulation #
########################
has_amplitude_modulation = Instrument.measurement(
":SOUR:AM:STAT?",
""" Reads a boolean value that is True if the amplitude modulation is enabled. """,
cast=bool
)
amplitude_depth = Instrument.control(
":SOUR:AM:DEPT?", ":SOUR:AM:DEPT %g",
""" A floating point property that controls the amplitude modulation
in precent, which can take values from 0 to 100 %. """,
validator=truncated_range,
values=[0, 100]
)
AMPLITUDE_SOURCES = {
'internal': 'INT', 'internal 2': 'INT2',
'external': 'EXT', 'external 2': 'EXT2'
}
amplitude_source = Instrument.control(
":SOUR:AM:SOUR?", ":SOUR:AM:SOUR %s",
""" A string property that controls the source of the amplitude modulation
signal, which can take the values: 'internal', 'internal 2', 'external', and
'external 2'. """,
validator=strict_discrete_set,
values=AMPLITUDE_SOURCES,
map_values=True
)
####################
# Pulse modulation #
####################
has_pulse_modulation = Instrument.measurement(
":SOUR:PULM:STAT?",
""" Reads a boolean value that is True if the pulse modulation is enabled. """,
cast=bool
)
PULSE_SOURCES = {
'internal': 'INT', 'external': 'EXT', 'scalar': 'SCAL'
}
pulse_source = Instrument.control(
":SOUR:PULM:SOUR?", ":SOUR:PULM:SOUR %s",
""" A string property that controls the source of the pulse modulation
signal, which can take the values: 'internal', 'external', and
'scalar'. """,
validator=strict_discrete_set,
values=PULSE_SOURCES,
map_values=True
)
PULSE_INPUTS = {
'square': 'SQU', 'free-run': 'FRUN',
'triggered': 'TRIG', 'doublet': 'DOUB', 'gated': 'GATE'
}
pulse_input = Instrument.control(
":SOUR:PULM:SOUR:INT?", ":SOUR:PULM:SOUR:INT %s",
""" A string property that controls the internally generated modulation
input for the pulse modulation, which can take the values: 'square', 'free-run',
'triggered', 'doublet', and 'gated'.
""",
validator=strict_discrete_set,
values=PULSE_INPUTS,
map_values=True
)
pulse_frequency = Instrument.control(
":SOUR:PULM:INT:FREQ?", ":SOUR:PULM:INT:FREQ %g",
""" A floating point property that controls the pulse rate frequency in Hertz,
which can take values from 0.1 Hz to 10 MHz. """,
validator=truncated_range,
values=[0.1, 10e6]
)
########################
# Low-Frequency Output #
########################
low_freq_out_amplitude = Instrument.control(
":SOUR:LFO:AMPL? ", ":SOUR:LFO:AMPL %g VP",
"""A floating point property that controls the peak voltage (amplitude) of the
low frequency output in volts, which can take values from 0-3.5V""",
validator=truncated_range,
values=[0, 3.5]
)
LOW_FREQUENCY_SOURCES = {
'internal': 'INT', 'internal 2': 'INT2', 'function': 'FUNC', 'function 2': 'FUNC2'
}
low_freq_out_source = Instrument.control(
":SOUR:LFO:SOUR?", ":SOUR:LFO:SOUR %s",
"""A string property which controls the source of the low frequency output, which
can take the values 'internal [2]' for the inernal source, or 'function [2]' for an internal
function generator which can be configured.""",
validator=strict_discrete_set,
values=LOW_FREQUENCY_SOURCES,
map_values=True
)
def enable_low_freq_out(self):
"""Enables low frequency output"""
self.write(":SOUR:LFO:STAT ON")
def disable_low_freq_out(self):
"""Disables low frequency output"""
self.write(":SOUR:LFO:STAT OFF")
def config_low_freq_out(self, source='internal', amplitude=3):
""" Configures the low-frequency output signal.
:param source: The source for the low-frequency output signal.
:param amplitude: Amplitude of the low-frequency output
"""
self.enable_low_freq_out()
self.low_freq_out_source = source
self.low_freq_out_amplitude = amplitude
#######################
# Internal Oscillator #
#######################
internal_frequency = Instrument.control(
":SOUR:AM:INT:FREQ?", ":SOUR:AM:INT:FREQ %g",
""" A floating point property that controls the frequency of the internal
oscillator in Hertz, which can take values from 0.5 Hz to 1 MHz. """,
validator=truncated_range,
values=[0.5, 1e6]
)
INTERNAL_SHAPES = {
'sine': 'SINE', 'triangle': 'TRI', 'square': 'SQU', 'ramp': 'RAMP',
'noise': 'NOIS', 'dual-sine': 'DUAL', 'swept-sine': 'SWEP'
}
internal_shape = Instrument.control(
":SOUR:AM:INT:FUNC:SHAP?", ":SOUR:AM:INT:FUNC:SHAP %s",
""" A string property that controls the shape of the internal oscillations,
which can take the values: 'sine', 'triangle', 'square', 'ramp', 'noise',
'dual-sine', and 'swept-sine'. """,
validator=strict_discrete_set,
values=INTERNAL_SHAPES,
map_values=True
)
def enable(self):
""" Enables the output of the signal. """
self.write(":OUTPUT ON;")
def disable(self):
""" Disables the output of the signal. """
self.write(":OUTPUT OFF;")
def disable_modulation(self):
""" Disables the signal modulation. """
self.write(":OUTPUT:MOD OFF;")
self.write(":lfo:stat off;")
def config_amplitude_modulation(self, frequency=1e3, depth=100.0, shape='sine'):
""" Configures the amplitude modulation of the output signal.
:param frequency: A modulation frequency for the internal oscillator
:param depth: A linear depth precentage
:param shape: A string that describes the shape for the internal oscillator
"""
self.enable_amplitude_modulation()
self.amplitude_source = 'internal'
self.internal_frequency = frequency
self.internal_shape = shape
self.amplitude_depth = depth
def enable_amplitude_modulation(self):
""" Enables amplitude modulation of the output signal. """
self.write(":SOUR:AM:STAT ON")
def disable_amplitude_modulation(self):
""" Disables amplitude modulation of the output signal. """
self.write(":SOUR:AM:STAT OFF")
def config_pulse_modulation(self, frequency=1e3, input='square'):
""" Configures the pulse modulation of the output signal.
:param frequency: A pulse rate frequency in Hertz
:param input: A string that describes the internal pulse input
"""
self.enable_pulse_modulation()
self.pulse_source = 'internal'
self.pulse_input = input
self.pulse_frequency = frequency
def enable_pulse_modulation(self):
""" Enables pulse modulation of the output signal. """
self.write(":SOUR:PULM:STAT ON")
def disable_pulse_modulation(self):
""" Disables pulse modulation of the output signal. """
self.write(":SOUR:PULM:STAT OFF")
def config_step_sweep(self):
""" Configures a step sweep through frequency """
self.write(":SOUR:FREQ:MODE SWE;"
":SOUR:SWE:GEN STEP;"
":SOUR:SWE:MODE AUTO;")
def start_step_sweep(self):
""" Starts a step sweep. """
self.write(":SOUR:SWE:CONT:STAT ON")
def stop_step_sweep(self):
""" Stops a step sweep. """
self.write(":SOUR:SWE:CONT:STAT OFF")
def shutdown(self):
""" Shuts down the instrument by disabling any modulation
and the output signal.
"""
self.disable_modulation()
self.disable()
| 36.533528
| 100
| 0.615194
|
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2022 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from pymeasure.instruments import Instrument
from pymeasure.instruments.validators import truncated_range, strict_discrete_set
class Agilent8257D(Instrument):
"""Represents the Agilent 8257D Signal Generator and
provides a high-level interface for interacting with
the instrument.
.. code-block:: python
generator = Agilent8257D("GPIB::1")
generator.power = 0 # Sets the output power to 0 dBm
generator.frequency = 5 # Sets the output frequency to 5 GHz
generator.enable() # Enables the output
"""
power = Instrument.control(
":POW?;", ":POW %g dBm;",
""" A floating point property that represents the output power
in dBm. This property can be set.
"""
)
frequency = Instrument.control(
":FREQ?;", ":FREQ %e Hz;",
""" A floating point property that represents the output frequency
in Hz. This property can be set.
"""
)
start_frequency = Instrument.control(
":SOUR:FREQ:STAR?", ":SOUR:FREQ:STAR %e Hz",
""" A floating point property that represents the start frequency
in Hz. This property can be set.
"""
)
center_frequency = Instrument.control(
":SOUR:FREQ:CENT?", ":SOUR:FREQ:CENT %e Hz;",
""" A floating point property that represents the center frequency
in Hz. This property can be set.
"""
)
stop_frequency = Instrument.control(
":SOUR:FREQ:STOP?", ":SOUR:FREQ:STOP %e Hz",
""" A floating point property that represents the stop frequency
in Hz. This property can be set.
"""
)
start_power = Instrument.control(
":SOUR:POW:STAR?", ":SOUR:POW:STAR %e dBm",
""" A floating point property that represents the start power
in dBm. This property can be set.
"""
)
stop_power = Instrument.control(
":SOUR:POW:STOP?", ":SOUR:POW:STOP %e dBm",
""" A floating point property that represents the stop power
in dBm. This property can be set.
"""
)
dwell_time = Instrument.control(
":SOUR:SWE:DWEL1?", ":SOUR:SWE:DWEL1 %.3f",
""" A floating point property that represents the settling time
in seconds at the current frequency or power setting.
This property can be set.
"""
)
step_points = Instrument.control(
":SOUR:SWE:POIN?", ":SOUR:SWE:POIN %d",
""" An integer number of points in a step sweep. This property
can be set.
"""
)
is_enabled = Instrument.measurement(
":OUTPUT?",
""" Reads a boolean value that is True if the output is on. """,
cast=bool
)
has_modulation = Instrument.measurement(
":OUTPUT:MOD?",
""" Reads a boolean value that is True if the modulation is enabled. """,
cast=bool
)
########################
# Amplitude modulation #
########################
has_amplitude_modulation = Instrument.measurement(
":SOUR:AM:STAT?",
""" Reads a boolean value that is True if the amplitude modulation is enabled. """,
cast=bool
)
amplitude_depth = Instrument.control(
":SOUR:AM:DEPT?", ":SOUR:AM:DEPT %g",
""" A floating point property that controls the amplitude modulation
in precent, which can take values from 0 to 100 %. """,
validator=truncated_range,
values=[0, 100]
)
AMPLITUDE_SOURCES = {
'internal': 'INT', 'internal 2': 'INT2',
'external': 'EXT', 'external 2': 'EXT2'
}
amplitude_source = Instrument.control(
":SOUR:AM:SOUR?", ":SOUR:AM:SOUR %s",
""" A string property that controls the source of the amplitude modulation
signal, which can take the values: 'internal', 'internal 2', 'external', and
'external 2'. """,
validator=strict_discrete_set,
values=AMPLITUDE_SOURCES,
map_values=True
)
####################
# Pulse modulation #
####################
has_pulse_modulation = Instrument.measurement(
":SOUR:PULM:STAT?",
""" Reads a boolean value that is True if the pulse modulation is enabled. """,
cast=bool
)
PULSE_SOURCES = {
'internal': 'INT', 'external': 'EXT', 'scalar': 'SCAL'
}
pulse_source = Instrument.control(
":SOUR:PULM:SOUR?", ":SOUR:PULM:SOUR %s",
""" A string property that controls the source of the pulse modulation
signal, which can take the values: 'internal', 'external', and
'scalar'. """,
validator=strict_discrete_set,
values=PULSE_SOURCES,
map_values=True
)
PULSE_INPUTS = {
'square': 'SQU', 'free-run': 'FRUN',
'triggered': 'TRIG', 'doublet': 'DOUB', 'gated': 'GATE'
}
pulse_input = Instrument.control(
":SOUR:PULM:SOUR:INT?", ":SOUR:PULM:SOUR:INT %s",
""" A string property that controls the internally generated modulation
input for the pulse modulation, which can take the values: 'square', 'free-run',
'triggered', 'doublet', and 'gated'.
""",
validator=strict_discrete_set,
values=PULSE_INPUTS,
map_values=True
)
pulse_frequency = Instrument.control(
":SOUR:PULM:INT:FREQ?", ":SOUR:PULM:INT:FREQ %g",
""" A floating point property that controls the pulse rate frequency in Hertz,
which can take values from 0.1 Hz to 10 MHz. """,
validator=truncated_range,
values=[0.1, 10e6]
)
########################
# Low-Frequency Output #
########################
low_freq_out_amplitude = Instrument.control(
":SOUR:LFO:AMPL? ", ":SOUR:LFO:AMPL %g VP",
"""A floating point property that controls the peak voltage (amplitude) of the
low frequency output in volts, which can take values from 0-3.5V""",
validator=truncated_range,
values=[0, 3.5]
)
LOW_FREQUENCY_SOURCES = {
'internal': 'INT', 'internal 2': 'INT2', 'function': 'FUNC', 'function 2': 'FUNC2'
}
low_freq_out_source = Instrument.control(
":SOUR:LFO:SOUR?", ":SOUR:LFO:SOUR %s",
"""A string property which controls the source of the low frequency output, which
can take the values 'internal [2]' for the inernal source, or 'function [2]' for an internal
function generator which can be configured.""",
validator=strict_discrete_set,
values=LOW_FREQUENCY_SOURCES,
map_values=True
)
def enable_low_freq_out(self):
"""Enables low frequency output"""
self.write(":SOUR:LFO:STAT ON")
def disable_low_freq_out(self):
"""Disables low frequency output"""
self.write(":SOUR:LFO:STAT OFF")
def config_low_freq_out(self, source='internal', amplitude=3):
""" Configures the low-frequency output signal.
:param source: The source for the low-frequency output signal.
:param amplitude: Amplitude of the low-frequency output
"""
self.enable_low_freq_out()
self.low_freq_out_source = source
self.low_freq_out_amplitude = amplitude
#######################
# Internal Oscillator #
#######################
internal_frequency = Instrument.control(
":SOUR:AM:INT:FREQ?", ":SOUR:AM:INT:FREQ %g",
""" A floating point property that controls the frequency of the internal
oscillator in Hertz, which can take values from 0.5 Hz to 1 MHz. """,
validator=truncated_range,
values=[0.5, 1e6]
)
INTERNAL_SHAPES = {
'sine': 'SINE', 'triangle': 'TRI', 'square': 'SQU', 'ramp': 'RAMP',
'noise': 'NOIS', 'dual-sine': 'DUAL', 'swept-sine': 'SWEP'
}
internal_shape = Instrument.control(
":SOUR:AM:INT:FUNC:SHAP?", ":SOUR:AM:INT:FUNC:SHAP %s",
""" A string property that controls the shape of the internal oscillations,
which can take the values: 'sine', 'triangle', 'square', 'ramp', 'noise',
'dual-sine', and 'swept-sine'. """,
validator=strict_discrete_set,
values=INTERNAL_SHAPES,
map_values=True
)
def __init__(self, adapter, **kwargs):
super().__init__(
adapter, "Agilent 8257D RF Signal Generator", **kwargs
)
def enable(self):
""" Enables the output of the signal. """
self.write(":OUTPUT ON;")
def disable(self):
""" Disables the output of the signal. """
self.write(":OUTPUT OFF;")
def enable_modulation(self):
self.write(":OUTPUT:MOD ON;")
self.write(":lfo:sour int; :lfo:ampl 2.0vp; :lfo:stat on;")
def disable_modulation(self):
""" Disables the signal modulation. """
self.write(":OUTPUT:MOD OFF;")
self.write(":lfo:stat off;")
def config_amplitude_modulation(self, frequency=1e3, depth=100.0, shape='sine'):
""" Configures the amplitude modulation of the output signal.
:param frequency: A modulation frequency for the internal oscillator
:param depth: A linear depth precentage
:param shape: A string that describes the shape for the internal oscillator
"""
self.enable_amplitude_modulation()
self.amplitude_source = 'internal'
self.internal_frequency = frequency
self.internal_shape = shape
self.amplitude_depth = depth
def enable_amplitude_modulation(self):
""" Enables amplitude modulation of the output signal. """
self.write(":SOUR:AM:STAT ON")
def disable_amplitude_modulation(self):
""" Disables amplitude modulation of the output signal. """
self.write(":SOUR:AM:STAT OFF")
def config_pulse_modulation(self, frequency=1e3, input='square'):
""" Configures the pulse modulation of the output signal.
:param frequency: A pulse rate frequency in Hertz
:param input: A string that describes the internal pulse input
"""
self.enable_pulse_modulation()
self.pulse_source = 'internal'
self.pulse_input = input
self.pulse_frequency = frequency
def enable_pulse_modulation(self):
""" Enables pulse modulation of the output signal. """
self.write(":SOUR:PULM:STAT ON")
def disable_pulse_modulation(self):
""" Disables pulse modulation of the output signal. """
self.write(":SOUR:PULM:STAT OFF")
def config_step_sweep(self):
""" Configures a step sweep through frequency """
self.write(":SOUR:FREQ:MODE SWE;"
":SOUR:SWE:GEN STEP;"
":SOUR:SWE:MODE AUTO;")
def enable_retrace(self):
self.write(":SOUR:LIST:RETR 1")
def disable_retrace(self):
self.write(":SOUR:LIST:RETR 0")
def single_sweep(self):
self.write(":SOUR:TSW")
def start_step_sweep(self):
""" Starts a step sweep. """
self.write(":SOUR:SWE:CONT:STAT ON")
def stop_step_sweep(self):
""" Stops a step sweep. """
self.write(":SOUR:SWE:CONT:STAT OFF")
def shutdown(self):
""" Shuts down the instrument by disabling any modulation
and the output signal.
"""
self.disable_modulation()
self.disable()
| 356
| 0
| 135
|
dec7d6d3d15ea5d55e90e3e5423d903170fe436f
| 12,428
|
py
|
Python
|
lib/googlecloudsdk/command_lib/storage/resources/s3_resource_reference.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
lib/googlecloudsdk/command_lib/storage/resources/s3_resource_reference.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/command_lib/storage/resources/s3_resource_reference.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 1
|
2020-07-25T01:40:19.000Z
|
2020-07-25T01:40:19.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""S3 API-specific resource subclasses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
from googlecloudsdk.api_lib.storage import errors
from googlecloudsdk.command_lib.storage.resources import resource_reference
from googlecloudsdk.command_lib.storage.resources import resource_util
_INCOMPLETE_OBJECT_METADATA_WARNING = (
'Use "-j", the JSON flag, to view additional S3 metadata.')
def _json_dump_recursion_helper(metadata):
"""See _get_json_dump docstring."""
if isinstance(metadata, list):
return [_json_dump_recursion_helper(item) for item in metadata]
if not isinstance(metadata, dict):
return resource_util.convert_to_json_parsable_type(metadata)
# Sort by key to make sure dictionary always prints in correct order.
formatted_dict = collections.OrderedDict(sorted(metadata.items()))
for key, value in formatted_dict.items():
if isinstance(value, dict):
# Recursively handle dictionaries.
formatted_dict[key] = _json_dump_recursion_helper(value)
elif isinstance(value, list):
# Recursively handled lists, which may contain more dicts, like ACLs.
formatted_list = [_json_dump_recursion_helper(item) for item in value]
if formatted_list:
# Ignore empty lists.
formatted_dict[key] = formatted_list
elif value or resource_util.should_preserve_falsy_metadata_value(value):
formatted_dict[key] = resource_util.convert_to_json_parsable_type(value)
return formatted_dict
def _get_json_dump(resource):
"""Formats S3 resource metadata as JSON.
Args:
resource (S3BucketResource|S3ObjectResource): Resource object.
Returns:
Formatted JSON string.
"""
return resource_util.configured_json_dumps(
collections.OrderedDict([
('url', resource.storage_url.url_string),
('type', resource.TYPE_STRING),
('metadata', _json_dump_recursion_helper(resource.metadata)),
]))
def _get_error_or_exists_string(value):
"""Returns error if value is error or existence string."""
if isinstance(value, errors.S3ApiError):
return value
else:
return resource_util.get_exists_string(value)
def _get_formatted_acl_section(acl_metadata):
"""Returns formatted ACLs, error, or formatted none value."""
if isinstance(acl_metadata, errors.S3ApiError):
return resource_util.get_padded_metadata_key_value_line('ACL', acl_metadata)
elif acl_metadata:
return resource_util.get_metadata_json_section_string(
'ACL', acl_metadata, _json_dump_recursion_helper)
else:
return resource_util.get_padded_metadata_key_value_line('ACL', '[]')
def _get_full_bucket_metadata_string(resource):
"""Formats S3 resource metadata as string with rows.
Args:
resource (S3BucketResource): Resource with metadata.
Returns:
Formatted multi-line string.
"""
# Hardcoded strings found in Boto docs:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html
logging_enabled_value = _get_error_or_exists_string(
resource.metadata['LoggingEnabled'])
website_value = _get_error_or_exists_string(resource.metadata['Website'])
cors_value = _get_error_or_exists_string(resource.metadata['CORSRules'])
encryption_value = _get_error_or_exists_string(
resource.metadata['ServerSideEncryptionConfiguration'])
lifecycle_configuration_value = _get_error_or_exists_string(
resource.metadata['LifecycleConfiguration'])
if isinstance(resource.metadata['Versioning'], errors.S3ApiError):
versioning_enabled_value = resource.metadata['Versioning']
else:
versioning_status = resource.metadata['Versioning'].get('Status')
if versioning_status == 'Enabled':
versioning_enabled_value = True
elif versioning_status == 'Suspended':
versioning_enabled_value = False
else:
versioning_enabled_value = None
if isinstance(resource.metadata['Payer'], errors.S3ApiError):
requester_pays_value = resource.metadata['Payer']
elif resource.metadata['Payer'] == 'Requester':
requester_pays_value = True
elif resource.metadata['Payer'] == 'BucketOwner':
requester_pays_value = False
else:
requester_pays_value = None
return (
'{bucket_url}:\n'
'{location_constraint_line}'
'{versioning_enabled_line}'
'{logging_config_line}'
'{website_config_line}'
'{cors_config_line}'
'{encryption_config_line}'
'{lifecycle_config_line}'
'{requester_pays_line}'
'{acl_section}'
).format(
bucket_url=resource.storage_url.versionless_url_string,
location_constraint_line=resource_util.get_padded_metadata_key_value_line(
'Location Constraint', resource.metadata['LocationConstraint']),
versioning_enabled_line=resource_util.get_padded_metadata_key_value_line(
'Versioning Enabled', versioning_enabled_value),
logging_config_line=resource_util.get_padded_metadata_key_value_line(
'Logging Configuration', logging_enabled_value),
website_config_line=resource_util.get_padded_metadata_key_value_line(
'Website Configuration', website_value),
cors_config_line=resource_util.get_padded_metadata_key_value_line(
'CORS Configuration', cors_value),
encryption_config_line=resource_util.get_padded_metadata_key_value_line(
'Encryption Configuration', encryption_value),
lifecycle_config_line=resource_util.get_padded_metadata_key_value_line(
'Lifecycle Configuration', lifecycle_configuration_value),
requester_pays_line=resource_util.get_padded_metadata_key_value_line(
'Requester Pays Enabled', requester_pays_value),
# Remove ending newline character because this is the last list item.
acl_section=_get_formatted_acl_section(resource.metadata['ACL'])[:-1])
def _get_full_object_metadata_string(resource):
"""Formats S3 resource metadata as string with rows.
Args:
resource (S3ObjectResource): Resource with metadata.
Returns:
Formatted multi-line string.
"""
# Hardcoded strings found in Boto docs:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html
if 'LastModified' in resource.metadata:
optional_time_updated_line = resource_util.get_padded_metadata_time_line(
'Update Time', resource.metadata['LastModified'])
else:
optional_time_updated_line = ''
if 'StorageClass' in resource.metadata:
optional_storage_class_line = resource_util.get_padded_metadata_key_value_line(
'Storage Class', resource.metadata['StorageClass'])
else:
optional_storage_class_line = ''
if 'CacheControl' in resource.metadata:
optional_cache_control_line = resource_util.get_padded_metadata_key_value_line(
'Cache-Control', resource.metadata['CacheControl'])
else:
optional_cache_control_line = ''
if 'CacheDisposition' in resource.metadata:
optional_content_disposition_line = resource_util.get_padded_metadata_key_value_line(
'Cache-Disposition', resource.metadata['CacheDisposition'])
else:
optional_content_disposition_line = ''
if 'ContentEncoding' in resource.metadata:
optional_content_encoding_line = resource_util.get_padded_metadata_key_value_line(
'Content-Encoding', resource.metadata['ContentEncoding'])
else:
optional_content_encoding_line = ''
if 'ContentLanguage' in resource.metadata:
optional_content_language_line = resource_util.get_padded_metadata_key_value_line(
'Content-Language', resource.metadata['ContentLanguage'])
else:
optional_content_language_line = ''
if 'PartsCount' in resource.metadata:
optional_component_count_line = (
resource_util.get_padded_metadata_key_value_line(
'Component-Count', resource.metadata['PartsCount']))
else:
optional_component_count_line = ''
if resource.md5_hash is not None:
optional_md5_line = resource_util.get_padded_metadata_key_value_line(
'Hash (MD5)', resource.md5_hash)
elif 'SSECustomerAlgorithm' in resource.metadata:
optional_md5_line = resource_util.get_padded_metadata_key_value_line(
'Hash (MD5)', 'Underlying data encrypted')
else:
optional_md5_line = ''
if 'SSECustomerAlgorithm' in resource.metadata:
optional_encryption_algorithm_line = (
resource_util.get_padded_metadata_key_value_line(
'Encryption Algorithm', resource.metadata['SSECustomerAlgorithm']))
else:
optional_encryption_algorithm_line = ''
if resource.generation:
optional_generation_line = resource_util.get_padded_metadata_key_value_line(
'Generation', resource.generation)
else:
optional_generation_line = ''
return (
'{object_url}:\n'
'{optional_time_updated_line}'
'{optional_storage_class_line}'
'{optional_cache_control_line}'
'{optional_content_disposition_line}'
'{optional_content_encoding_line}'
'{optional_content_language_line}'
'{content_length_line}'
'{content_type_line}'
'{optional_component_count_line}'
'{optional_md5_line}'
'{optional_encryption_algorithm_line}'
'{etag_line}'
'{optional_generation_line}'
'{acl_section}'
' {incomplete_warning}').format(
object_url=resource.storage_url.versionless_url_string,
optional_time_updated_line=optional_time_updated_line,
optional_storage_class_line=optional_storage_class_line,
optional_cache_control_line=optional_cache_control_line,
optional_content_disposition_line=optional_content_disposition_line,
optional_content_encoding_line=optional_content_encoding_line,
optional_content_language_line=optional_content_language_line,
content_length_line=resource_util.get_padded_metadata_key_value_line(
'Content-Length', resource.size),
content_type_line=resource_util.get_padded_metadata_key_value_line(
'Content-Type', resource.metadata.get('ContentType')),
optional_component_count_line=optional_component_count_line,
optional_md5_line=optional_md5_line,
optional_encryption_algorithm_line=optional_encryption_algorithm_line,
etag_line=resource_util.get_padded_metadata_key_value_line(
'ETag', resource.etag),
optional_generation_line=optional_generation_line,
acl_section=_get_formatted_acl_section(resource.metadata.get('ACL')),
incomplete_warning=_INCOMPLETE_OBJECT_METADATA_WARNING)
class S3BucketResource(resource_reference.BucketResource):
"""API-specific subclass for handling metadata."""
class S3ObjectResource(resource_reference.ObjectResource):
"""API-specific subclass for handling metadata."""
def __init__(self,
storage_url_object,
content_type=None,
creation_time=None,
etag=None,
crc32c_hash=None,
md5_hash=None,
metadata=None,
metageneration=None,
size=None):
"""Initializes resource. Args are a subset of attributes."""
super(S3ObjectResource, self).__init__(
storage_url_object,
content_type=content_type,
creation_time=creation_time,
etag=etag,
crc32c_hash=None,
md5_hash=md5_hash,
metadata=metadata,
metageneration=metageneration,
size=size)
| 38.716511
| 89
| 0.745735
|
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""S3 API-specific resource subclasses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
from googlecloudsdk.api_lib.storage import errors
from googlecloudsdk.command_lib.storage.resources import resource_reference
from googlecloudsdk.command_lib.storage.resources import resource_util
_INCOMPLETE_OBJECT_METADATA_WARNING = (
'Use "-j", the JSON flag, to view additional S3 metadata.')
def _json_dump_recursion_helper(metadata):
"""See _get_json_dump docstring."""
if isinstance(metadata, list):
return [_json_dump_recursion_helper(item) for item in metadata]
if not isinstance(metadata, dict):
return resource_util.convert_to_json_parsable_type(metadata)
# Sort by key to make sure dictionary always prints in correct order.
formatted_dict = collections.OrderedDict(sorted(metadata.items()))
for key, value in formatted_dict.items():
if isinstance(value, dict):
# Recursively handle dictionaries.
formatted_dict[key] = _json_dump_recursion_helper(value)
elif isinstance(value, list):
# Recursively handled lists, which may contain more dicts, like ACLs.
formatted_list = [_json_dump_recursion_helper(item) for item in value]
if formatted_list:
# Ignore empty lists.
formatted_dict[key] = formatted_list
elif value or resource_util.should_preserve_falsy_metadata_value(value):
formatted_dict[key] = resource_util.convert_to_json_parsable_type(value)
return formatted_dict
def _get_json_dump(resource):
"""Formats S3 resource metadata as JSON.
Args:
resource (S3BucketResource|S3ObjectResource): Resource object.
Returns:
Formatted JSON string.
"""
return resource_util.configured_json_dumps(
collections.OrderedDict([
('url', resource.storage_url.url_string),
('type', resource.TYPE_STRING),
('metadata', _json_dump_recursion_helper(resource.metadata)),
]))
def _get_error_or_exists_string(value):
"""Returns error if value is error or existence string."""
if isinstance(value, errors.S3ApiError):
return value
else:
return resource_util.get_exists_string(value)
def _get_formatted_acl_section(acl_metadata):
"""Returns formatted ACLs, error, or formatted none value."""
if isinstance(acl_metadata, errors.S3ApiError):
return resource_util.get_padded_metadata_key_value_line('ACL', acl_metadata)
elif acl_metadata:
return resource_util.get_metadata_json_section_string(
'ACL', acl_metadata, _json_dump_recursion_helper)
else:
return resource_util.get_padded_metadata_key_value_line('ACL', '[]')
def _get_full_bucket_metadata_string(resource):
"""Formats S3 resource metadata as string with rows.
Args:
resource (S3BucketResource): Resource with metadata.
Returns:
Formatted multi-line string.
"""
# Hardcoded strings found in Boto docs:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html
logging_enabled_value = _get_error_or_exists_string(
resource.metadata['LoggingEnabled'])
website_value = _get_error_or_exists_string(resource.metadata['Website'])
cors_value = _get_error_or_exists_string(resource.metadata['CORSRules'])
encryption_value = _get_error_or_exists_string(
resource.metadata['ServerSideEncryptionConfiguration'])
lifecycle_configuration_value = _get_error_or_exists_string(
resource.metadata['LifecycleConfiguration'])
if isinstance(resource.metadata['Versioning'], errors.S3ApiError):
versioning_enabled_value = resource.metadata['Versioning']
else:
versioning_status = resource.metadata['Versioning'].get('Status')
if versioning_status == 'Enabled':
versioning_enabled_value = True
elif versioning_status == 'Suspended':
versioning_enabled_value = False
else:
versioning_enabled_value = None
if isinstance(resource.metadata['Payer'], errors.S3ApiError):
requester_pays_value = resource.metadata['Payer']
elif resource.metadata['Payer'] == 'Requester':
requester_pays_value = True
elif resource.metadata['Payer'] == 'BucketOwner':
requester_pays_value = False
else:
requester_pays_value = None
return (
'{bucket_url}:\n'
'{location_constraint_line}'
'{versioning_enabled_line}'
'{logging_config_line}'
'{website_config_line}'
'{cors_config_line}'
'{encryption_config_line}'
'{lifecycle_config_line}'
'{requester_pays_line}'
'{acl_section}'
).format(
bucket_url=resource.storage_url.versionless_url_string,
location_constraint_line=resource_util.get_padded_metadata_key_value_line(
'Location Constraint', resource.metadata['LocationConstraint']),
versioning_enabled_line=resource_util.get_padded_metadata_key_value_line(
'Versioning Enabled', versioning_enabled_value),
logging_config_line=resource_util.get_padded_metadata_key_value_line(
'Logging Configuration', logging_enabled_value),
website_config_line=resource_util.get_padded_metadata_key_value_line(
'Website Configuration', website_value),
cors_config_line=resource_util.get_padded_metadata_key_value_line(
'CORS Configuration', cors_value),
encryption_config_line=resource_util.get_padded_metadata_key_value_line(
'Encryption Configuration', encryption_value),
lifecycle_config_line=resource_util.get_padded_metadata_key_value_line(
'Lifecycle Configuration', lifecycle_configuration_value),
requester_pays_line=resource_util.get_padded_metadata_key_value_line(
'Requester Pays Enabled', requester_pays_value),
# Remove ending newline character because this is the last list item.
acl_section=_get_formatted_acl_section(resource.metadata['ACL'])[:-1])
def _get_full_object_metadata_string(resource):
"""Formats S3 resource metadata as string with rows.
Args:
resource (S3ObjectResource): Resource with metadata.
Returns:
Formatted multi-line string.
"""
# Hardcoded strings found in Boto docs:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html
if 'LastModified' in resource.metadata:
optional_time_updated_line = resource_util.get_padded_metadata_time_line(
'Update Time', resource.metadata['LastModified'])
else:
optional_time_updated_line = ''
if 'StorageClass' in resource.metadata:
optional_storage_class_line = resource_util.get_padded_metadata_key_value_line(
'Storage Class', resource.metadata['StorageClass'])
else:
optional_storage_class_line = ''
if 'CacheControl' in resource.metadata:
optional_cache_control_line = resource_util.get_padded_metadata_key_value_line(
'Cache-Control', resource.metadata['CacheControl'])
else:
optional_cache_control_line = ''
if 'CacheDisposition' in resource.metadata:
optional_content_disposition_line = resource_util.get_padded_metadata_key_value_line(
'Cache-Disposition', resource.metadata['CacheDisposition'])
else:
optional_content_disposition_line = ''
if 'ContentEncoding' in resource.metadata:
optional_content_encoding_line = resource_util.get_padded_metadata_key_value_line(
'Content-Encoding', resource.metadata['ContentEncoding'])
else:
optional_content_encoding_line = ''
if 'ContentLanguage' in resource.metadata:
optional_content_language_line = resource_util.get_padded_metadata_key_value_line(
'Content-Language', resource.metadata['ContentLanguage'])
else:
optional_content_language_line = ''
if 'PartsCount' in resource.metadata:
optional_component_count_line = (
resource_util.get_padded_metadata_key_value_line(
'Component-Count', resource.metadata['PartsCount']))
else:
optional_component_count_line = ''
if resource.md5_hash is not None:
optional_md5_line = resource_util.get_padded_metadata_key_value_line(
'Hash (MD5)', resource.md5_hash)
elif 'SSECustomerAlgorithm' in resource.metadata:
optional_md5_line = resource_util.get_padded_metadata_key_value_line(
'Hash (MD5)', 'Underlying data encrypted')
else:
optional_md5_line = ''
if 'SSECustomerAlgorithm' in resource.metadata:
optional_encryption_algorithm_line = (
resource_util.get_padded_metadata_key_value_line(
'Encryption Algorithm', resource.metadata['SSECustomerAlgorithm']))
else:
optional_encryption_algorithm_line = ''
if resource.generation:
optional_generation_line = resource_util.get_padded_metadata_key_value_line(
'Generation', resource.generation)
else:
optional_generation_line = ''
return (
'{object_url}:\n'
'{optional_time_updated_line}'
'{optional_storage_class_line}'
'{optional_cache_control_line}'
'{optional_content_disposition_line}'
'{optional_content_encoding_line}'
'{optional_content_language_line}'
'{content_length_line}'
'{content_type_line}'
'{optional_component_count_line}'
'{optional_md5_line}'
'{optional_encryption_algorithm_line}'
'{etag_line}'
'{optional_generation_line}'
'{acl_section}'
' {incomplete_warning}').format(
object_url=resource.storage_url.versionless_url_string,
optional_time_updated_line=optional_time_updated_line,
optional_storage_class_line=optional_storage_class_line,
optional_cache_control_line=optional_cache_control_line,
optional_content_disposition_line=optional_content_disposition_line,
optional_content_encoding_line=optional_content_encoding_line,
optional_content_language_line=optional_content_language_line,
content_length_line=resource_util.get_padded_metadata_key_value_line(
'Content-Length', resource.size),
content_type_line=resource_util.get_padded_metadata_key_value_line(
'Content-Type', resource.metadata.get('ContentType')),
optional_component_count_line=optional_component_count_line,
optional_md5_line=optional_md5_line,
optional_encryption_algorithm_line=optional_encryption_algorithm_line,
etag_line=resource_util.get_padded_metadata_key_value_line(
'ETag', resource.etag),
optional_generation_line=optional_generation_line,
acl_section=_get_formatted_acl_section(resource.metadata.get('ACL')),
incomplete_warning=_INCOMPLETE_OBJECT_METADATA_WARNING)
class S3BucketResource(resource_reference.BucketResource):
"""API-specific subclass for handling metadata."""
def get_full_metadata_string(self):
return _get_full_bucket_metadata_string(self)
def get_json_dump(self):
return _get_json_dump(self)
class S3ObjectResource(resource_reference.ObjectResource):
"""API-specific subclass for handling metadata."""
def __init__(self,
storage_url_object,
content_type=None,
creation_time=None,
etag=None,
crc32c_hash=None,
md5_hash=None,
metadata=None,
metageneration=None,
size=None):
"""Initializes resource. Args are a subset of attributes."""
super(S3ObjectResource, self).__init__(
storage_url_object,
content_type=content_type,
creation_time=creation_time,
etag=etag,
crc32c_hash=None,
md5_hash=md5_hash,
metadata=metadata,
metageneration=metageneration,
size=size)
def get_full_metadata_string(self):
return _get_full_object_metadata_string(self)
def get_json_dump(self):
return _get_json_dump(self)
| 198
| 0
| 100
|
d66df52d49ff61c5175e83fab8cd02546b04169c
| 1,982
|
py
|
Python
|
apistar_sentry.py
|
LeadPages/apistar_sentry
|
f718784b256399ae04f4e8bf82b177f9cc3b1008
|
[
"MIT"
] | 2
|
2018-06-10T14:37:04.000Z
|
2018-06-16T22:33:46.000Z
|
apistar_sentry.py
|
LeadPages/apistar_sentry
|
f718784b256399ae04f4e8bf82b177f9cc3b1008
|
[
"MIT"
] | 3
|
2020-03-24T17:19:55.000Z
|
2021-02-02T22:08:44.000Z
|
apistar_sentry.py
|
LeadPages/apistar_sentry
|
f718784b256399ae04f4e8bf82b177f9cc3b1008
|
[
"MIT"
] | 1
|
2018-04-16T18:44:33.000Z
|
2018-04-16T18:44:33.000Z
|
import typing
from apistar import Settings
from apistar.interfaces import Auth
from apistar.types import ReturnValue
from raven import Client
__version__ = "0.2.0"
| 26.426667
| 72
| 0.590817
|
import typing
from apistar import Settings
from apistar.interfaces import Auth
from apistar.types import ReturnValue
from raven import Client
__version__ = "0.2.0"
class Sentry:
def __init__(self, settings: Settings) -> None:
self.client = Client(
settings["SENTRY_DSN"],
environment=settings["ENVIRONMENT"],
release=settings["VERSION"],
)
@classmethod
def setup(cls, settings: Settings) -> typing.Optional["Sentry"]:
if settings.get("SENTRY_DSN"):
return cls(settings)
return None
@classmethod
def setup_celery(cls, settings: Settings) -> None:
from raven.contrib import celery as raven_celery
sentry = cls(settings)
raven_celery.register_logger_signal(sentry.client)
raven_celery.register_signal(sentry.client)
def track(self, auth: Auth) -> None:
self.client.context.activate()
if auth is not None:
self.client.context.merge({
"user": {
"id": auth.get_user_id(),
"name": auth.get_display_name(),
"authenticated": auth.is_authenticated(),
}
})
def clear(self) -> None:
self.client.context.clear()
def capture_exception(self) -> None:
self.client.captureException()
class SentryMixin:
def exception_handler(self, exc: Exception, sentry: Sentry) -> None:
try:
return super().exception_handler(exc)
except Exception:
if sentry is not None:
try:
sentry.capture_exception()
finally:
sentry.clear()
raise
def before_request(auth: Auth, sentry: Sentry) -> None:
if sentry is not None:
sentry.track(auth)
def after_request(sentry: Sentry, ret: ReturnValue) -> ReturnValue:
if sentry is not None:
sentry.clear()
return ret
| 1,510
| 184
| 118
|
460ed8df205faa3ecff6b37fb600ecfce371a297
| 7,121
|
py
|
Python
|
app.py
|
tanasijevich/project3
|
cd4870727e31bad47868625a59a565f4b96d80a5
|
[
"MIT"
] | null | null | null |
app.py
|
tanasijevich/project3
|
cd4870727e31bad47868625a59a565f4b96d80a5
|
[
"MIT"
] | null | null | null |
app.py
|
tanasijevich/project3
|
cd4870727e31bad47868625a59a565f4b96d80a5
|
[
"MIT"
] | null | null | null |
# import necessary libraries
# from models import create_classes
import pandas as pd
import os
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from sqlite3 import connect
import json
from flask import (
Flask,
render_template,
jsonify,
request,
redirect,
jsonify)
# Read data from csv
#csv_file = "data/Chicago Health Atlas.csv"
#df = pd.read_csv(csv_file)
#df.head()
#df.rename(columns={"VRDIBR_2015-2019":"VRDIBR_2015_2019","VRDIAR_2015-2018":"VRDIAR_2015_2018","VRDTH_2015-2019":"VRDTH_2015_2019","VRCAR_2015-2019":"VRCAR_2015_2019","VRADR_2015-2019":"VRADR_2015_2019","HDX_2015-2019":"HDX_2015_2019"},inplace=True)
#creating sqlite engine to create database
#engine = create_engine('sqlite:///data/Chicago_Health_database.db')
#engine = create_engine('sqlite:///C:/Users/doyel/Desktop/project3_flask_ex1/data/mydatabase.db')
#Table name : Chicago_Health_Atlas
#df.to_sql('Chicago_Health_Atlas',con=engine,if_exists='replace')
#####################################################################
engine = create_engine("sqlite:///data/mydatabase.db")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
print(Base.classes.keys())
Healthatlas = Base.classes.healthatlas
#Actors = Base.classes.actors
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
# ---------------------------------------------------------
# Web site
@app.route("/")
@app.route("/data.html")
@app.route("/templates/map.html")
@app.route("/templates/d3_chart.html")
# ---------------------------------------------------------
# API to call "when data.html" page is loading with community information table
@app.route("/api/community")
# API to call when a disease is selectd from list by user in "data.html" page
@app.route("/api/deceases/<decease>")
@app.route("/api/geojson")
@app.route('/api/d3_chart/<field_x>/<field_y>')
if __name__ == "__main__":
app.run()
| 42.136095
| 576
| 0.72869
|
# import necessary libraries
# from models import create_classes
import pandas as pd
import os
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from sqlite3 import connect
import json
from flask import (
Flask,
render_template,
jsonify,
request,
redirect,
jsonify)
# Read data from csv
#csv_file = "data/Chicago Health Atlas.csv"
#df = pd.read_csv(csv_file)
#df.head()
#df.rename(columns={"VRDIBR_2015-2019":"VRDIBR_2015_2019","VRDIAR_2015-2018":"VRDIAR_2015_2018","VRDTH_2015-2019":"VRDTH_2015_2019","VRCAR_2015-2019":"VRCAR_2015_2019","VRADR_2015-2019":"VRADR_2015_2019","HDX_2015-2019":"HDX_2015_2019"},inplace=True)
#creating sqlite engine to create database
#engine = create_engine('sqlite:///data/Chicago_Health_database.db')
#engine = create_engine('sqlite:///C:/Users/doyel/Desktop/project3_flask_ex1/data/mydatabase.db')
#Table name : Chicago_Health_Atlas
#df.to_sql('Chicago_Health_Atlas',con=engine,if_exists='replace')
#####################################################################
engine = create_engine("sqlite:///data/mydatabase.db")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
print(Base.classes.keys())
Healthatlas = Base.classes.healthatlas
#Actors = Base.classes.actors
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
# ---------------------------------------------------------
# Web site
@app.route("/")
def home():
return render_template("index.html")
@app.route("/data.html")
def data():
return render_template("data.html")
@app.route("/templates/map.html")
def map():
return render_template("map.html")
@app.route("/templates/d3_chart.html")
def d3_chart():
return render_template("d3_chart.html")
# ---------------------------------------------------------
# API to call "when data.html" page is loading with community information table
@app.route("/api/community")
def community_grid():
session = Session(engine)
results = session.query(Healthatlas.Name,Healthatlas.Median_Household_Income,Healthatlas.Poverty_Rate,Healthatlas.Receiving_Food_Stamps,Healthatlas.Public_Assistance_Income,Healthatlas.High_School_Grad_Rate, Healthatlas.College_Grad_Rate,Healthatlas.Non_Hispanic_White,Healthatlas.Non_Hispanic_Black,Healthatlas.Asian_Pacific_Islander,Healthatlas.Hispanic_or_Latino,Healthatlas.Population_All,Healthatlas.Population_Infants,Healthatlas.Population_Juveniles,Healthatlas.Population_Young_Adults,Healthatlas.Population_Middle_Aged_Adults,Healthatlas.Population_Seniors).all()
#results = session.query(Healthatlas.Name,Healthatlas.GEOID, Healthatlas.Population,Healthatlas.Longitude, Healthatlas.Latitude).all()
#results = pd.read_sql('SELECT Name,GEOID,Population,Longitude,Latitude FROM Chicago_Health_Atlas', engine)
#results = engine.execute("SELECT Name,GEOID,Population,Longitude,Latitude FROM Chicago_Health_Atlas").fetchall()
#session.query(Movies.title, Movies.director, Movies.year, Movies.rating, Movies.imdb_votes, Movies.imdb_score).all()
results = [list(r) for r in results]
table_results = {
"table": results
}
session.close()
return jsonify(table_results)
# API to call when a disease is selectd from list by user in "data.html" page
@app.route("/api/deceases/<decease>")
def deceases(decease):
session = Session(engine)
if decease == "diabetes":
results = session.query(Healthatlas.Name, Healthatlas.Population_All, Healthatlas.Longitude, Healthatlas.Latitude, Healthatlas.VRDIAR_2015_2019, Healthatlas.HDX_2015_2019, Healthatlas.HCSSP_2016_2018, Healthatlas.HCSSMKP_2016_2018).all()
elif decease == "diabetes_related":
results = session.query(Healthatlas.Name, Healthatlas.Population_All, Healthatlas.Longitude, Healthatlas.Latitude, Healthatlas.VRDIBR_2015_2019, Healthatlas.HDX_2015_2019, Healthatlas.HCSSP_2016_2018, Healthatlas.HCSSMKP_2016_2018 ).all()
elif decease == "alzheimer":
results = session.query(Healthatlas.Name, Healthatlas.Population_All, Healthatlas.Longitude, Healthatlas.Latitude, Healthatlas.VRADR_2015_2019, Healthatlas.HDX_2015_2019, Healthatlas.HCSSP_2016_2018, Healthatlas.HCSSMKP_2016_2018).all()
elif decease == "cancer":
results = session.query(Healthatlas.Name, Healthatlas.Population_All, Healthatlas.Longitude, Healthatlas.Latitude, Healthatlas.VRCAR_2015_2019, Healthatlas.HDX_2015_2019, Healthatlas.HCSSP_2016_2018, Healthatlas.HCSSMKP_2016_2018).all()
elif decease == "hypertension":
results = session.query(Healthatlas.Name, Healthatlas.Population_All, Healthatlas.Longitude, Healthatlas.Latitude, Healthatlas.HCSHYTP_2016_2018, Healthatlas.HDX_2015_2019, Healthatlas.HCSSP_2016_2018, Healthatlas.HCSSMKP_2016_2018).all()
elif decease == "adult_obesity":
results = session.query(Healthatlas.Name, Healthatlas.Population_All, Healthatlas.Longitude, Healthatlas.Latitude, Healthatlas.HCSOBP_2016_2018, Healthatlas.HDX_2015_2019, Healthatlas.HCSSP_2016_2018, Healthatlas.HCSSMKP_2016_2018).all()
elif decease == "coronary_heart_disease":
results = session.query(Healthatlas.Name, Healthatlas.Population_All, Healthatlas.Longitude, Healthatlas.Latitude, Healthatlas.VRCHDR_2015_2019, Healthatlas.HDX_2015_2019, Healthatlas.HCSSP_2016_2018, Healthatlas.HCSSMKP_2016_2018).all()
#elif decease == "all" :
# results = session.query(Healthatlas.Name, Healthatlas.Population_All, Healthatlas.Longitude, Healthatlas.Latitude, Healthatlas.VRDTH_2015_2019, Healthatlas.HDX_2015_2019).all()
else:
results = session.query(Healthatlas.Name,Healthatlas.Population_All, Healthatlas.Longitude, Healthatlas.Latitude, Healthatlas.VRADR_2015_2019, Healthatlas.HDX_2015_2019, Healthatlas.HCSSP_2016_2018, Healthatlas.HCSSMKP_2016_2018).all()
results = [list(r) for r in results]
name = [result[4] for result in results]
hardship = [result[5] for result in results]
soda = [result[6] for result in results]
smoke = [result[7] for result in results]
decease_results = {
"decease_name": name,
"hd_index": hardship,
"soda_con":soda,
"smoking":smoke,
}
session.close()
return jsonify(decease_results)
@app.route("/api/geojson")
def map_data():
with open('data/geo.json', 'r') as file:
your_data = json.loads(file.read())
# print(your_data)
return jsonify(your_data)
@app.route('/api/d3_chart/<field_x>/<field_y>')
def d3_chart_api(field_x, field_y):
session = Session(engine)
x_column = getattr(Healthatlas, field_x)
y_column = getattr(Healthatlas, field_y)
results = session.query(x_column, y_column).all()
results = [list(r) for r in results]
session.close()
return jsonify(results)
if __name__ == "__main__":
app.run()
| 4,772
| 0
| 176
|
e202b3b1b0dd6517b189261d661038ca7be2cad9
| 2,377
|
py
|
Python
|
lambda/extract_yelp/extract.py
|
Rdbaker/barfinder
|
63c75dc99f2371371aa8072078175558d1917864
|
[
"BSD-3-Clause"
] | null | null | null |
lambda/extract_yelp/extract.py
|
Rdbaker/barfinder
|
63c75dc99f2371371aa8072078175558d1917864
|
[
"BSD-3-Clause"
] | null | null | null |
lambda/extract_yelp/extract.py
|
Rdbaker/barfinder
|
63c75dc99f2371371aa8072078175558d1917864
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
from models import tag as Tag, business as Business, business_tag_join_table
logging.getLogger().setLevel(logging.INFO)
def business_exists(yelp_id, conn):
"""Return True if the business exists."""
return conn.execute(Business.select().where(Business.c.yelp_id == yelp_id))\
.first() is not None
def delete_business(yelp_id, conn):
"""Delete the business with the given yelp id."""
return conn.execute(Business.delete().where(Business.c.yelp_id == yelp_id))
| 32.561644
| 80
| 0.640303
|
import logging
from models import tag as Tag, business as Business, business_tag_join_table
logging.getLogger().setLevel(logging.INFO)
def parse_yelp_business(business):
return {
'source': 'yelp',
'raw_yelp_data': business,
'yelp_id': business.get('id'),
'name': business.get('name', 'UNKNOWN'),
'price': len(business.get('price', '')),
'latitude': business.get('coordinates', {}).get('latitude'),
'longitude': business.get('coordinates', {}).get('longitude'),
'phone': business.get('phone'),
}
def business_exists(yelp_id, conn):
"""Return True if the business exists."""
return conn.execute(Business.select().where(Business.c.yelp_id == yelp_id))\
.first() is not None
def delete_business(yelp_id, conn):
"""Delete the business with the given yelp id."""
return conn.execute(Business.delete().where(Business.c.yelp_id == yelp_id))
def tag_exists(alias, conn):
return conn.execute(Tag.select().where(Tag.c.alias == alias))\
.first() is not None
def create_tag(tag, conn):
conn.execute(Tag.insert().values(**tag))
def get_or_create_tags(tags, conn):
names = []
for tag in tags:
if not tag_exists(tag['alias'], conn):
create_tag(tag, conn)
names.append(tag['alias'])
return conn.execute(Tag.select().where(Tag.c.alias.in_(names))).fetchall()
def create_business(business, conn):
conn.execute(Business.insert().values(**business))
return conn.execute(Business.select().where(Business.c.yelp_id ==
business['yelp_id'])).first()
def link_business_to_tags(business, tags, conn):
for tag in tags:
conn.execute(
business_tag_join_table.insert().values(tag_id=tag.id,
business_id=business.id))
def extract_business(business_dict, engine):
conn = engine.connect()
if business_exists(business_dict['id'], conn):
delete_business(business_dict['id'], conn)
business = parse_yelp_business(business_dict)
tags = get_or_create_tags(business_dict['categories'], conn)
business = create_business(business, conn)
link_business_to_tags(business, tags, conn)
logging.info('successfully processed business: {}'
.format(business_dict['id']))
| 1,706
| 0
| 161
|
5ddcfbd5c5a68beee52f20bf25f10cc164269d23
| 14,652
|
py
|
Python
|
doubling_agent/motility_functions.py
|
lkmartin90/doubling_agent
|
73a7f06aa43c5fa51ea1263b72ebe6f8319bf894
|
[
"MIT"
] | 1
|
2020-12-03T15:47:24.000Z
|
2020-12-03T15:47:24.000Z
|
doubling_agent/motility_functions.py
|
lkmartin90/doubling_agent
|
73a7f06aa43c5fa51ea1263b72ebe6f8319bf894
|
[
"MIT"
] | null | null | null |
doubling_agent/motility_functions.py
|
lkmartin90/doubling_agent
|
73a7f06aa43c5fa51ea1263b72ebe6f8319bf894
|
[
"MIT"
] | null | null | null |
from random import random
from random import choice
import numpy as np
import plotly.express as px
import struct
import operator
###
# Broadly the same as "basic_functions.py" but updated to include motility
# intentionally trying to keep them separate so as not to slow down the basic version
###
| 36.35732
| 115
| 0.574597
|
from random import random
from random import choice
import numpy as np
import plotly.express as px
import struct
import operator
###
# Broadly the same as "basic_functions.py" but updated to include motility
# intentionally trying to keep them separate so as not to slow down the basic version
###
class MotilityParameters:
def __init__(self, switch_to_m_rate, switch_to_p_rate, motility_rate):
# 0 motility state is proliferating, 1 is moving
self.m = switch_to_m_rate
self.p = switch_to_p_rate
self.rate = motility_rate
self.dict = {0: switch_to_m_rate, 1: switch_to_p_rate + motility_rate}
class ParametersBasic:
def __init__(self, s_division_rate, epsilon, p_division_rate, apoptosis_rate):
self.s = s_division_rate
self.p = p_division_rate
self.e = epsilon
self.a = apoptosis_rate
self.dict = {0: s_division_rate, 1: p_division_rate, 2: apoptosis_rate}
self.death = {0: 0, 1: 0, 2: apoptosis_rate}
class ParametersQuiescent:
def __init__(self, k1, k2, k3, k4, k5, k6, k7, k8):
# s>s+s :k1, s>s+p:k2, s>dead:k3, p>p+p:k4, p>dead:k5, p>Q:k6, D>dead:k7, Q>s:k8
self.k1 = k1
self.k2 = k2
self.k3 = k3
self.k4 = k4
self.k5 = k5
self.k6 = k6
self.k7 = k7
self.k8 = k8
# rate of something happening for each state
# note the slight change in notation, 0 is stem cell, 1 progenitor, 2 differentiated and 3 quiescent
self.dict = {0: k1+k2+k3, 1: k4+k5+k6, 2: k7, 3: k8}
self.death = {0: k3, 1: k5, 2: k7, 3: 0}
def cancer_seed_single(cells, switch_3d):
# created initial cancer stem cell at [0,0]
if switch_3d:
cells.update({(0, 0, 0): [0, 0, 0]})
else:
cells.update({(0,0): [0, 0, 0]})
def cancer_seed_single_quiescent(cells):
# created initial cancer cell (differentiated) at [0,0]
cells.update({(0,0): [3, 0, 0]})
def cancer_seed_single_progen(cells):
# created initial cancer cell (differentiated) at [0,0]
cells.update({(0,0): [1, 0, 0]})
def timing_update_all(cells, params, mot_params):
# update second entry in dict to give a timing based on the first entry, the state
# time is log(1/rand_no)/rate
# Now want to account for fact that cells can either change motility state or move or divide
# options:
# motility state 1: can either move, change motility state, or die
# motility state 0: can either change motility state or go though all division choices
# including death (already written)
for k in cells.keys():
state = cells.get(k)[0]
mot = cells.get(k)[2]
div = params.dict[state] # division or death rate for motility 0
m_or_c = mot_params.dict[mot] # move or change rate
mot_death = params.death[state] # death rate for motility state 1
if mot == 0:
rate = div+m_or_c
else:
rate = m_or_c + mot_death
cells.update({k: [state, np.log(1/random())/rate, mot]})
def choose_new_pos(pos, cells):
# Identifies a free position for a cell to divide or move into. In this function a 2d square grid is used
# space is searched for in the surrounding area, by random number generator, if there is already a cell
# occupying the space then that space is excluded from possible locations and a new random number is generated.
i = pos[0]
j = pos[1]
neighbours = [(i+1, j), (i-1, j), (i, j-1), (i, j+1)]
options = [0, 1, 2, 3]
cont = 0
new_pos = 0
while cont == 0 and len(options) > 0:
pick = choice(options)
check = neighbours[pick]
if check in cells:
options.remove(pick)
else:
cont = 1
new_pos = check
return new_pos
def choose_new_pos_eq(pos, cells):
# choses a new position by identifying all the free spaces first and then assigning them all equal probability
i = pos[0]
j = pos[1]
neighbours = [(i+1, j), (i-1, j), (i, j-1), (i, j+1)]
options = [0, 1, 2, 3]
for n in range(len(neighbours)):
if neighbours[n] in cells:
options.remove(n)
if len(options) > 0:
new_pos = neighbours[choice(options)]
else:
new_pos = 0
return new_pos
def choose_new_pos_3d(pos, cells):
# 3d version of "choose_new_pos", the same method is used
i = pos[0]
j = pos[1]
k = pos[2]
# this currently assumes only square transitions on the cubic grid, may want to alter
neighbours = [(i + 1, j, k), (i - 1, j, k), (i, j + 1, k), (i, j - 1, k), (i, j, k + 1), (i, j, k - 1)]
options = [0, 1, 2, 3, 4, 5]
cont = 0
new_pos = 0
while cont == 0 and len(options) > 0:
pick = choice(options)
check = neighbours[pick]
if check in cells:
options.remove(pick)
else:
cont = 1
new_pos = check
return new_pos
def choose_new_pos_3d_eq(pos, cells):
# 3d version of "choose_new_pos", the same method is used
i = pos[0]
j = pos[1]
k = pos[2]
# this currently assumes only square transitions on the cubic grid, may want to alter
neighbours = [(i + 1, j, k), (i - 1, j, k), (i, j + 1, k), (i, j - 1, k), (i, j, k + 1), (i, j, k - 1)]
options = [0, 1, 2, 3, 4, 5]
cont = 0
new_pos = 0
while cont == 0 and len(options) > 0:
pick = choice(options)
check = neighbours[pick]
if check in cells:
options.remove(pick)
else:
cont = 1
new_pos = check
return new_pos
def move_cell(cells, pos, state, switch_3d):
# moves the cell
if switch_3d:
new_location = choose_new_pos_3d(pos, cells)
else:
new_location = choose_new_pos(pos, cells)
if new_location != 0:
del cells[pos]
cells.update({new_location: [state, 0, 1]})
def update_cell_basic(cells, pos, params, switch_3d, mot_params):
# updates a given cell based on the current state of that cell
# pos is string describing position
# time is from random number generator giving time of interaction
# cells is dict describing all cells in the tumour
state = cells.get(pos)[0]
mot = cells.get(pos)[2]
# Once motility is included first thing is to make a decision on whether the cell moves, divides, or switches
# to a different motility state. Need to check that an appropriate time step is being used still.
mot_check = random()
if mot == 1:
# Can move, cell can either switch motility state or move or die
if mot_check < mot_params.p/(mot_params.dict.get(mot) + params.death.get(state)):
# then the motilty state changes
cells.update({pos: [state, 0, abs(mot-1)]})
elif mot_check < mot_params.dict.get(mot)/(mot_params.dict.get(mot) + params.death.get(state)):
# The cell moves
move_cell(cells, pos, state, switch_3d)
else:
# cell death
del cells[pos]
else:
# No motility, can either switch state or go to division decisions
if mot_check < mot_params.m/(mot_params.dict.get(mot) + params.dict.get(state)):
# then the motilty state changes
cells.update({pos: [state, 0, abs(mot - 1)]})
# The cell divides or dies, can ust move on to that section as we have already conditioned on the
# probability of it happening
else:
if switch_3d:
daughter = choose_new_pos_3d(pos, cells)
else:
daughter = choose_new_pos(pos, cells)
if state == 0:
# if it's a stem cell there are 2 possibilities, S > S + S, S > S + P
# generate random number to determine fate, compare to epsilon
r_num = random()
if r_num < params.e:
# divide > S + S
if daughter != 0:
cells.update({daughter: [0, 0, 0]})
else:
# divide > S + P
if daughter != 0:
cells.update({daughter: [1, 0, 0]})
elif state == 1:
# if it's a progentior cell there are 2 possibilities, P > P + P, P > D
# generate random number to determine fate, start by assuming each happens with equal chance
r_num = random()
if r_num < 0.5:
# P > P + P
if daughter != 0:
cells.update({daughter: [1, 0, 0]})
else:
# P > D
cells.update({pos: [2, 0, 0]})
else:
# If it is differentiated cell the only possible state change is death
del cells[pos]
def update_cell_quiescent(cells, pos, params, switch_3d, mot_params):
# updates a given cell based on the current state of that cell
# pos is string describing position
# time is from random number generator giving time of interaction
# cells is dict describing all cells in the tumour
state = cells.get(pos)[0]
mot = cells.get(pos)[2]
# Once motility is included first thing is to make a decision on whether the cell moves, divides, or switches
# to a different motility state. Need to check that an appropriate time step is being used still.
mot_check = random()
if mot == 1:
# Can move, cell can either switch motility state or move or die
if mot_check < mot_params.p / (mot_params.dict.get(mot) + params.death.get(state)):
# then the motilty state changes
cells.update({pos: [state, 0, abs(mot - 1)]})
elif mot_check < mot_params.dict.get(mot) / (mot_params.dict.get(mot) + params.death.get(state)):
# The cell moves
move_cell(cells, pos, state, switch_3d)
else:
# cell death
del cells[pos]
else:
# No motility, can either switch state or go to division decisions
if mot_check < mot_params.m / (mot_params.dict.get(mot) + params.dict.get(state)):
# then the motilty state changes
cells.update({pos: [state, 0, abs(mot - 1)]})
else:
if switch_3d:
daughter = choose_new_pos_3d(pos, cells)
else:
daughter = choose_new_pos(pos, cells)
if state == 0:
# if it's a stem cell there are 3 possibilities, S > S + S, S > S + P and S > dead
# generate random number to determine fate
r_num = random()
if r_num < params.k1/params.dict.get(0):
# divide > S + S
if daughter != 0:
cells.update({daughter: [0, 0, 0]})
elif r_num < (params.k1+params.k2)/params.dict.get(0):
# divide > S + P
if daughter != 0:
cells.update({daughter: [1, 0, 0]})
else:
# die
del cells[pos]
elif state == 1:
# if it's a progentior cell there are 3 possibilities, P > P + P, P > D, P > Q
# generate random number to determine fate
r_num = random()
if r_num < params.k4/params.dict.get(1):
# P > P + P
if daughter != 0:
cells.update({daughter: [1, 0, 0]})
elif r_num < (params.k4+params.k5)/params.dict.get(1):
# P > D
cells.update({pos: [2, 0, 0]})
else:
# P > Q
cells.update({pos: [3, 0, 0]})
elif state == 2:
# If it is differentiated cell the only possible state change is death
del cells[pos]
else:
# If its Quiescent the only possible fate is to return to a stem cell
cells.update({pos: [0, 0, 0]})
def animate(animation_df, r, name):
# animate the simulations using plotly and save as a .html
animation_df['coord'] = animation_df[['x', 'y']].values.tolist()
animation_df['coord'] = animation_df['coord'].apply(lambda x: np.array(x))
#print(animation_df)
if len(animation_df['coord'].values[0]) > 2:
print("currently cannot animate for 3d")
raise ValueError()
mapping = {0: 'stem cell', 1: 'progenitor cell', 2: 'differentiated cell', 3: 'quiescent cell'}
animation_df = animation_df.replace({'state': mapping})
animation_df = animation_df.append(
{'state': 'differentiated cell', 'count': 0, 'coord': 0, 'x': 10000, 'y': 10000},
ignore_index=True)
animation_df = animation_df.append(
{'state': 'progenitor cell', 'count': 0, 'coord': 0, 'x': 10000, 'y': 10000},
ignore_index=True)
animation_df = animation_df.append(
{'state': 'quiescent cell', 'count': 0, 'coord': 0, 'x': 10000, 'y': 10000},
ignore_index=True)
fig = px.scatter(animation_df, x="x", y="y", animation_frame="count",
color='state', size_max=55, range_x=[-50, 50], range_y=[-50, 50])
fig.update_traces(marker=dict(size=12))
fig.layout.updatemenus[0].buttons[0].args[1]["frame"]["duration"] = 20
fig.show()
fig.write_html(name + '/ani_' + str(r) + '.html')
def read_from_file(file_name, switch_3d):
# read data from binary file in the form: time step, x, y, state, motility
if switch_3d:
struct_fmt = '=iiiiii' # 6 ints
else:
struct_fmt = '=iiiii' # 5 ints
struct_len = struct.calcsize(struct_fmt)
struct_unpack = struct.Struct(struct_fmt).unpack_from
results = []
with open(file_name, "rb") as f:
while True:
data = f.read(struct_len)
if not data: break
s = struct_unpack(data)
results.append(s)
return results
def calculate_timestep(params, mot_params):
# calculates timestep based on the probability of 2 or more events happening in a timestep (<0.01)
max_rate = max(params.dict.items(), key=operator.itemgetter(1))[1] + \
max(mot_params.dict.items(), key=operator.itemgetter(1))[1]
# playing it safe by summing max of each
lambert = 0.135157
step = lambert/max_rate
print('exact timestep from calculation', step)
if step > 0.1:
return step // 0.1 * 0.1
elif step > 0.01:
return step // 0.01 * 0.01
else:
return step // 0.001 * 0.001
| 13,857
| 10
| 469
|
4f5568285363d98aa73edfd78a49dd8dcefd4487
| 2,616
|
py
|
Python
|
DATA/Ejercicio Pandas/MisMacrosPy.py
|
JeisonAlarcon/Data_Mining
|
265f1ffc202ca1f8b9f6223e95d56bb72e4c8ff3
|
[
"MIT"
] | null | null | null |
DATA/Ejercicio Pandas/MisMacrosPy.py
|
JeisonAlarcon/Data_Mining
|
265f1ffc202ca1f8b9f6223e95d56bb72e4c8ff3
|
[
"MIT"
] | null | null | null |
DATA/Ejercicio Pandas/MisMacrosPy.py
|
JeisonAlarcon/Data_Mining
|
265f1ffc202ca1f8b9f6223e95d56bb72e4c8ff3
|
[
"MIT"
] | null | null | null |
import pandas as pd
| 51.294118
| 147
| 0.619266
|
import pandas as pd
def onewayfreq(rows,data,weight=None,cum=True,ord="level",subset=None):
if weight is None: weight = 1
else: weight = data[weight]
if subset != None: data = data.query(subset,engine="python")
out = (pd.crosstab(index=data[rows],values=weight,aggfunc="sum",columns="sum").reset_index()
.rename(columns={"sum":"Frequency"})
.eval("Percent=100*Frequency/Frequency.sum()",engine="python")
)
if ord == "freq": out = out.sort_values(by=["Frequency"],ascending=False).reset_index(drop=True)
if ord == "-freq": out = out.sort_values(by=["Frequency"],ascending=True).reset_index(drop=True)
if cum == True: out = (out.eval("CumulativeFrequency=Frequency.cumsum()",engine="python")
.eval("CumulativePercent=Percent.cumsum()",engine="python"))
out.columns.name=""
return(out)
def twowayfreq(rows,columns,data,weight=None,subset=None,ord="level",percent=False,rowpercent=False,colpercent=False):
if weight is None: weight = 1
else: weight = data[weight]
if subset != None: data = data.query(subset,engine="python")
out = pd.crosstab(index=data[rows],values=weight,aggfunc="sum",columns=data[columns],margins=True,margins_name="Total").reset_index().fillna(0)
p1 = out.head(n=(len(out.index)-1))
if ord == "freq": p1 = p1.sort_values(by=["Total"],ascending=False)
if ord == "-freq": p1 = p1.sort_values(by=["Total"],ascending=True)
out = p1.append(out.tail(n=1)).reset_index(drop=True)
ids = out.select_dtypes("number").columns
out2 = out.copy().assign(Ind = "2")
out3 = out.copy().assign(Ind = "3")
out4 = out.copy().assign(Ind = "4")
out = out.assign(Ind = "1")
def pct(x):
return(200*x/x.sum())
out2[ids] = 200*out2[ids]/out2["Total"].sum()
out3[ids] = out._get_numeric_data().apply(pct,axis=1)
out4[ids] = out._get_numeric_data().apply(pct,axis=0)
out = out.append(out2).append(out3).append(out4).rename_axis('MyIdx').sort_values(by=["MyIdx","Ind"])
out.loc[out["Ind"] != "1",rows] = ""
print("Frequency")
if percent == False: out = out.query("Ind != '2'")
else: print("Percent")
if rowpercent == False: out = out.query("Ind != '3'")
else: print("Row percent")
if colpercent == False: out = out.query("Ind != '4'")
else: print("Col percent")
out = out.drop(columns="Ind").reset_index(drop=True)
out.columns.name=""
out.index.names=[""]
out = out.rename(columns={rows: rows + " / " + columns})
print(" ")
return(out)
| 2,551
| 0
| 46
|
7c713c20032eec8a8f8dbf77f8cd9a9bca904c31
| 1,454
|
py
|
Python
|
TSP.py
|
ccfelius/TravelingSalesMan
|
ebc3b960859590623c0eb301545cd093c41d157a
|
[
"MIT"
] | 1
|
2020-12-10T17:36:39.000Z
|
2020-12-10T17:36:39.000Z
|
TSP.py
|
ccfelius/TravelingSalesMan
|
ebc3b960859590623c0eb301545cd093c41d157a
|
[
"MIT"
] | null | null | null |
TSP.py
|
ccfelius/TravelingSalesMan
|
ebc3b960859590623c0eb301545cd093c41d157a
|
[
"MIT"
] | 1
|
2021-01-05T13:08:07.000Z
|
2021-01-05T13:08:07.000Z
|
""" TSP SIMULATED ANNEALING """
# Imports
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# read data from file
filename = "eil51.tsp"
f = open(f"TSP-configurations/{filename}.txt", "r")
network = f.readlines()[6:-1]
# create dictionary to store coordinates
nodes = dict()
# split data and put in dict
for node in network:
node = [int(x) for x in node.rstrip().split(' ')]
nodes[node[0]] = node[1:]
x = [x[0] for x in nodes.values()]
y = [y[1] for y in nodes.values()]
# load in data of optimal path
data = pd.read_csv("data/eil51.tsp.tsp-batch-20.txt", sep="\t")
colname = "428.87"
z = list(map(float,list(data[f'{colname}-19'])))
# optimum so far (costs = 428.87175639203394)
# r= [1.0, 32, 11, 38, 5, 37, 17, 4, 18, 47, 12, 46, 51.0, 27, 6, 48, 23, 7, 43, 24, 14, 25, 13, 41, 40, 19, 42, 44, 15, 45, 33, 39, 10, 49, 9, 30, 34, 21, 50, 16, 2, 29, 20, 35, 36, 3, 28, 31, 26, 8, 22, 1.0]
temp = []
# get coordinates of each point
for item in z:
temp.append(nodes[item])
temp = np.array(temp)
# path = [temp[i:i+2] for i in range(len(temp)-2+1)]
# print(path)
# Plot the nodes and coordinates
fig, ax = plt.subplots()
ax.scatter(x, y, color="deeppink")
for i, txt in enumerate(nodes.keys()):
ax.annotate(txt, (x[i], y[i]))
ax.plot(*temp.T, color="deeppink", alpha=0.5)
ax.set_title(f"Shortest Route: {filename}, costs: {colname}", fontsize=16)
#
plt.savefig("plots/eil51-opt-route-3.png")
plt.show()
| 25.508772
| 209
| 0.636176
|
""" TSP SIMULATED ANNEALING """
# Imports
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# read data from file
filename = "eil51.tsp"
f = open(f"TSP-configurations/{filename}.txt", "r")
network = f.readlines()[6:-1]
# create dictionary to store coordinates
nodes = dict()
# split data and put in dict
for node in network:
node = [int(x) for x in node.rstrip().split(' ')]
nodes[node[0]] = node[1:]
x = [x[0] for x in nodes.values()]
y = [y[1] for y in nodes.values()]
# load in data of optimal path
data = pd.read_csv("data/eil51.tsp.tsp-batch-20.txt", sep="\t")
colname = "428.87"
z = list(map(float,list(data[f'{colname}-19'])))
# optimum so far (costs = 428.87175639203394)
# r= [1.0, 32, 11, 38, 5, 37, 17, 4, 18, 47, 12, 46, 51.0, 27, 6, 48, 23, 7, 43, 24, 14, 25, 13, 41, 40, 19, 42, 44, 15, 45, 33, 39, 10, 49, 9, 30, 34, 21, 50, 16, 2, 29, 20, 35, 36, 3, 28, 31, 26, 8, 22, 1.0]
temp = []
# get coordinates of each point
for item in z:
temp.append(nodes[item])
temp = np.array(temp)
# path = [temp[i:i+2] for i in range(len(temp)-2+1)]
# print(path)
# Plot the nodes and coordinates
fig, ax = plt.subplots()
ax.scatter(x, y, color="deeppink")
for i, txt in enumerate(nodes.keys()):
ax.annotate(txt, (x[i], y[i]))
ax.plot(*temp.T, color="deeppink", alpha=0.5)
ax.set_title(f"Shortest Route: {filename}, costs: {colname}", fontsize=16)
#
plt.savefig("plots/eil51-opt-route-3.png")
plt.show()
| 0
| 0
| 0
|
161e27c9bae1210d9e0d2d5bf83676988c609bbf
| 90,220
|
py
|
Python
|
tests/Lopez12CPL/test_Lopez12CPL.py
|
jrlivesey/vplanet
|
4384221baa78e81d46b0c0fcb7de2f5a5de2e83c
|
[
"MIT"
] | null | null | null |
tests/Lopez12CPL/test_Lopez12CPL.py
|
jrlivesey/vplanet
|
4384221baa78e81d46b0c0fcb7de2f5a5de2e83c
|
[
"MIT"
] | null | null | null |
tests/Lopez12CPL/test_Lopez12CPL.py
|
jrlivesey/vplanet
|
4384221baa78e81d46b0c0fcb7de2f5a5de2e83c
|
[
"MIT"
] | null | null | null |
import astropy.units as u
import pytest
from benchmark import Benchmark, benchmark
@benchmark(
{
"log.initial.system.Age": {"value": 3.155760e13, "unit": u.sec},
"log.initial.system.Time": {"value": 0.000000, "unit": u.sec},
"log.initial.system.TotAngMom": {
"value": 6.747268e40,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.system.TotEnergy": {"value": -2.482441e43, "unit": u.Joule},
"log.initial.system.PotEnergy": {"value": -2.482440e43, "unit": u.Joule},
"log.initial.system.KinEnergy": {"value": 5.347271e34, "unit": u.Joule},
"log.initial.system.DeltaTime": {"value": 0.000000, "unit": u.sec},
"log.initial.star.Mass": {"value": 1.988416e30, "unit": u.kg},
"log.initial.star.Obliquity": {"value": 0.000000, "unit": u.rad},
"log.initial.star.PrecA": {"value": 0.000000, "unit": u.rad},
"log.initial.star.Xobl": {"value": 0.000000},
"log.initial.star.Yobl": {"value": 0.000000},
"log.initial.star.Zobl": {"value": 1.000000},
"log.initial.star.Radius": {"value": 6.378100e06, "unit": u.m},
"log.initial.star.RadGyra": {"value": 0.500000},
"log.initial.star.RotAngMom": {
"value": 1.470605e39,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.star.RotKinEnergy": {"value": 5.347271e34, "unit": u.Joule},
"log.initial.star.RotVel": {"value": 463.828521, "unit": u.m / u.sec},
"log.initial.star.BodyType": {"value": 0.000000},
"log.initial.star.RotRate": {"value": 7.272205e-05, "unit": 1 / u.sec},
"log.initial.star.RotPer": {"value": 8.640000e04, "unit": u.sec},
"log.initial.star.Density": {"value": 1.829552e09, "unit": u.kg / u.m ** 3},
"log.initial.star.SurfEnFluxTotal": {
"value": 4.474499e-12,
"unit": u.kg / u.sec ** 3,
},
"log.initial.star.TidalQ": {"value": 1.000000e06},
"log.initial.star.ImK2": {"value": -5.000000e-07},
"log.initial.star.K2": {"value": 0.500000},
"log.initial.star.K2Man": {"value": 0.010000},
"log.initial.star.Imk2Man": {"value": 0.000000},
"log.initial.star.TidalQMantle": {"value": 100.000000},
"log.initial.star.HEcc": {"value": 0.000000},
"log.initial.star.HZLimitDryRunaway": {"value": 3.036202e09, "unit": u.m},
"log.initial.star.HZLimRecVenus": {"value": 2.502002e09, "unit": u.m},
"log.initial.star.HZLimRunaway": {"value": 3.267138e09, "unit": u.m},
"log.initial.star.HZLimMoistGreenhouse": {"value": 3.310536e09, "unit": u.m},
"log.initial.star.HZLimMaxGreenhouse": {"value": 5.611497e09, "unit": u.m},
"log.initial.star.HZLimEarlyMars": {"value": 6.122597e09, "unit": u.m},
"log.initial.star.Instellation": {
"value": -1.000000,
"unit": u.kg / u.sec ** 3,
},
"log.initial.star.KEcc": {"value": 0.000000},
"log.initial.star.Eccentricity": {"value": -1.000000},
"log.initial.star.OrbEnergy": {"value": 0.000000, "unit": u.Joule},
"log.initial.star.MeanMotion": {"value": -1.000000, "unit": 1 / u.sec},
"log.initial.star.OrbPeriod": {"value": -1.000000, "unit": u.sec},
"log.initial.star.SemiMajorAxis": {"value": -1.000000, "unit": u.m},
"log.initial.star.CriticalSemiMajorAxis": {"value": -1.000000, "unit": u.m},
"log.initial.star.COPP": {"value": 0.000000},
"log.initial.star.OrbAngMom": {
"value": 0.000000,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.star.LongP": {"value": 0.000000, "unit": u.rad},
"log.initial.star.LXUVTot": {"value": 1.923000e20, "unit": u.kg / u.sec ** 3},
"log.initial.star.TotOrbEnergy": {"value": -2.119237e35, "unit": u.Joule},
"log.initial.star.OrbPotEnergy": {"value": -1.000000, "unit": u.Joule},
"log.initial.star.LostEnergy": {"value": 5.562685e-309, "unit": u.Joule},
"log.initial.star.LostAngMom": {
"value": 5.562685e-309,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.star.LockTime": {"value": -1.000000, "unit": u.sec},
"log.initial.star.BodyDsemiDtEqtide": {"value": -1.000000},
"log.initial.star.BodyDeccDt": {"value": -1.000000},
"log.initial.star.DOblDtEqtide": {"value": 0.000000, "unit": u.rad / u.sec},
"log.initial.star.DRotPerDtEqtide": {"value": 2.054554e-27},
"log.initial.star.DRotRateDtEqtide": {
"value": -1.729298e-36,
"unit": 1 / u.sec ** 2,
},
"log.initial.star.EqRotRateDiscrete": {
"value": 6.296062e-06,
"unit": 1 / u.sec,
},
"log.initial.star.EqRotPerDiscrete": {"value": 9.979547e05, "unit": u.sec},
"log.initial.star.EqRotRateCont": {"value": 8.688566e-06, "unit": 1 / u.sec},
"log.initial.star.EqRotPerCont": {"value": 7.231556e05, "unit": u.sec},
"log.initial.star.EqRotPer": {"value": 9.979547e05, "unit": u.sec},
"log.initial.star.EqTidePower": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.star.GammaRot": {"value": -1.000000, "unit": u.sec},
"log.initial.star.GammaOrb": {"value": -1.000000, "unit": u.sec},
"log.initial.star.OceanK2": {"value": 0.010000},
"log.initial.star.EnvTidalQ": {"value": -1.000000},
"log.initial.star.OceanTidalQ": {"value": -1.000000},
"log.initial.star.TideLock": {"value": 0.000000},
"log.initial.star.RotTimeEqtide": {"value": 0.000000, "unit": u.sec},
"log.initial.star.EnvK2": {"value": 0.010000},
"log.initial.star.OblTimeEqtide": {"value": -1.000000},
"log.initial.star.PowerEqtide": {"value": 2287.372458, "unit": u.W},
"log.initial.star.SurfEnFluxEqtide": {
"value": 4.474499e-12,
"unit": u.kg / u.sec ** 3,
},
"log.initial.star.Luminosity": {"value": 1.923000e23, "unit": u.W},
"log.initial.star.LXUVStellar": {"value": 1.923000e20, "unit": u.W},
"log.initial.star.Temperature": {"value": 5778.000000, "unit": u.K},
"log.initial.star.LXUVFrac": {"value": 0.001000},
"log.initial.star.RossbyNumber": {"value": 0.078260},
"log.initial.star.DRotPerDtStellar": {"value": 6.530034e-18},
"log.initial.auto.Mass": {"value": 2.000000, "unit": u.Mearth},
"log.initial.auto.Obliquity": {"value": 0.785398, "unit": u.rad},
"log.initial.auto.PrecA": {"value": 0.000000, "unit": u.rad},
"log.initial.auto.Xobl": {"value": 0.707107},
"log.initial.auto.Yobl": {"value": 0.000000},
"log.initial.auto.Zobl": {"value": 0.707107},
"log.initial.auto.Radius": {"value": 2.096446e08, "unit": u.m},
"log.initial.auto.RadGyra": {"value": 0.400000},
"log.initial.auto.RotAngMom": {
"value": 1.221650e37,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.auto.RotKinEnergy": {"value": 8.884088e32, "unit": u.Joule},
"log.initial.auto.RotVel": {"value": 3.049157e04, "unit": u.m / u.sec},
"log.initial.auto.BodyType": {"value": 0.000000},
"log.initial.auto.RotRate": {"value": 0.000145, "unit": 1 / u.sec},
"log.initial.auto.RotPer": {"value": 0.500000, "unit": u.day},
"log.initial.auto.Density": {"value": 0.309474, "unit": u.kg / u.m ** 3},
"log.initial.auto.SurfEnFluxTotal": {
"value": 2.324795e04,
"unit": u.kg / u.sec ** 3,
},
"log.initial.auto.TidalQ": {"value": -1.000000e05},
"log.initial.auto.ImK2": {"value": -5.000000e-06},
"log.initial.auto.K2": {"value": 0.500000},
"log.initial.auto.K2Man": {"value": 0.300000},
"log.initial.auto.Imk2Man": {"value": -0.003000},
"log.initial.auto.TidalQMantle": {"value": 100.000000},
"log.initial.auto.HEcc": {"value": 0.000000},
"log.initial.auto.HZLimitDryRunaway": {"value": 3.098811e09, "unit": u.m},
"log.initial.auto.HZLimRecVenus": {"value": 2.502002e09, "unit": u.m},
"log.initial.auto.HZLimRunaway": {"value": 3.267138e09, "unit": u.m},
"log.initial.auto.HZLimMoistGreenhouse": {"value": 3.310536e09, "unit": u.m},
"log.initial.auto.HZLimMaxGreenhouse": {"value": 5.611497e09, "unit": u.m},
"log.initial.auto.HZLimEarlyMars": {"value": 6.122597e09, "unit": u.m},
"log.initial.auto.Instellation": {
"value": 69.788358,
"unit": u.kg / u.sec ** 3,
},
"log.initial.auto.KEcc": {"value": 0.200000},
"log.initial.auto.Eccentricity": {"value": 0.200000},
"log.initial.auto.OrbEnergy": {"value": -5.298093e34, "unit": u.Joule},
"log.initial.auto.MeanMotion": {"value": 6.296062e-06, "unit": 1 / u.sec},
"log.initial.auto.OrbPeriod": {"value": 9.979547e05, "unit": u.sec},
"log.initial.auto.SemiMajorAxis": {"value": 0.100000, "unit": u.au},
"log.initial.auto.CriticalSemiMajorAxis": {"value": -1.000000, "unit": u.m},
"log.initial.auto.COPP": {"value": 0.000000},
"log.initial.auto.OrbAngMom": {
"value": 1.648983e40,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.auto.LongP": {"value": 0.000000, "unit": u.rad},
"log.initial.auto.LXUVTot": {"value": -1.000000, "unit": u.kg / u.sec ** 3},
"log.initial.auto.TotOrbEnergy": {"value": -2.119237e35, "unit": u.Joule},
"log.initial.auto.OrbPotEnergy": {"value": -1.059619e35, "unit": u.Joule},
"log.initial.auto.LostEnergy": {"value": 5.562685e-309, "unit": u.Joule},
"log.initial.auto.TidalRadius": {"value": 2.096446e08, "unit": u.m},
"log.initial.auto.DsemiDtEqtide": {"value": 0.000192, "unit": u.m / u.sec},
"log.initial.auto.DeccDtEqtide": {"value": 4.036352e-15, "unit": 1 / u.sec},
"log.initial.auto.DMeanMotionDtEqtide": {
"value": -1.211805e-19,
"unit": 1 / u.sec ** 2,
},
"log.initial.auto.DOrbPerDtEqtide": {"value": 1.920766e-08},
"log.initial.auto.EccTimeEqtide": {"value": 4.954969e13, "unit": u.sec},
"log.initial.auto.SemiTimeEqtide": {"value": 7.793412e13, "unit": u.sec},
"log.initial.auto.DHEccDtEqtide": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.auto.DKEccDtEqtide": {"value": 4.036352e-15, "unit": 1 / u.sec},
"log.initial.auto.DXoblDtEqtide": {"value": 1.462258e-12, "unit": 1 / u.sec},
"log.initial.auto.DYoblDtEqtide": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.auto.DZoblDtEqtide": {"value": -1.462258e-12, "unit": 1 / u.sec},
"log.initial.auto.LockTime": {"value": -1.000000, "unit": u.sec},
"log.initial.auto.BodyDsemiDtEqtide": {"value": -1.000000},
"log.initial.auto.BodyDeccDt": {"value": -1.000000},
"log.initial.auto.DOblDtEqtide": {"value": 2.067945e-12, "unit": u.rad / u.sec},
"log.initial.auto.DRotPerDtEqtide": {"value": 3.287202e-07},
"log.initial.auto.DRotRateDtEqtide": {
"value": -1.106722e-15,
"unit": 1 / u.sec ** 2,
},
"log.initial.auto.EqRotRateDiscrete": {
"value": 6.296062e-06,
"unit": 1 / u.sec,
},
"log.initial.auto.EqRotPerDiscrete": {"value": 9.979547e05, "unit": u.sec},
"log.initial.auto.EqRotRateCont": {"value": 8.688566e-06, "unit": 1 / u.sec},
"log.initial.auto.EqRotPerCont": {"value": 7.231556e05, "unit": u.sec},
"log.initial.auto.EqRotPer": {"value": 9.979547e05, "unit": u.sec},
"log.initial.auto.EqTidePower": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.auto.GammaRot": {"value": -1.000000, "unit": u.sec},
"log.initial.auto.GammaOrb": {"value": -1.000000, "unit": u.sec},
"log.initial.auto.OceanK2": {"value": 0.010000},
"log.initial.auto.EnvTidalQ": {"value": -1.000000},
"log.initial.auto.OceanTidalQ": {"value": -1.000000},
"log.initial.auto.TideLock": {"value": 0.000000},
"log.initial.auto.RotTimeEqtide": {"value": 1.314188e11, "unit": u.sec},
"log.initial.auto.EnvK2": {"value": 0.500000},
"log.initial.auto.OblTimeEqtide": {"value": -1.000000},
"log.initial.auto.PowerEqtide": {"value": 1.284046e22, "unit": u.W},
"log.initial.auto.SurfEnFluxEqtide": {
"value": 2.324895e04,
"unit": u.kg / u.sec ** 3,
},
"log.initial.auto.SurfWaterMass": {"value": 0.000000, "unit": u.kg},
"log.initial.auto.EnvelopeMass": {"value": 1.000000, "unit": u.Mearth},
"log.initial.auto.OxygenMass": {"value": 0.000000, "unit": u.kg},
"log.initial.auto.RGLimit": {"value": 3.099115e09, "unit": u.m},
"log.initial.auto.XO": {"value": 0.000000},
"log.initial.auto.EtaO": {"value": 0.000000},
"log.initial.auto.PlanetRadius": {"value": 32.869442, "unit": u.Rearth},
"log.initial.auto.OxygenMantleMass": {"value": 0.000000, "unit": u.kg},
"log.initial.auto.RadXUV": {"value": -1.000000, "unit": u.m},
"log.initial.auto.RadSolid": {"value": -1.000000, "unit": u.m},
"log.initial.auto.PresXUV": {"value": 5.000000},
"log.initial.auto.ScaleHeight": {"value": -1.000000, "unit": u.m},
"log.initial.auto.ThermTemp": {"value": 400.000000, "unit": u.K},
"log.initial.auto.AtmGasConst": {"value": 4124.000000},
"log.initial.auto.PresSurf": {"value": -1.000000, "unit": u.Pa},
"log.initial.auto.DEnvMassDt": {"value": -2.508715e09, "unit": u.kg / u.sec},
"log.initial.auto.FXUV": {"value": 0.069788, "unit": u.W / u.m ** 2},
"log.initial.auto.AtmXAbsEffH2O": {"value": 0.300000},
"log.initial.auto.RocheRadius": {"value": 1.885546e08, "unit": u.m},
"log.initial.auto.BondiRadius": {"value": 7.899468e08, "unit": u.m},
"log.initial.auto.HEscapeRegime": {"value": 3.000000},
"log.initial.auto.RRCriticalFlux": {"value": 0.000139, "unit": u.W / u.m ** 2},
"log.initial.auto.KTide": {"value": 1.000000},
"log.initial.auto.RGDuration": {"value": 1.00000e06, "unit": u.yr},
"log.initial.bondi.Mass": {"value": 2.000000, "unit": u.Mearth},
"log.initial.bondi.Obliquity": {"value": 0.785398, "unit": u.rad},
"log.initial.bondi.PrecA": {"value": 0.000000, "unit": u.rad},
"log.initial.bondi.Xobl": {"value": 0.707107},
"log.initial.bondi.Yobl": {"value": 0.000000},
"log.initial.bondi.Zobl": {"value": 0.707107},
"log.initial.bondi.Radius": {"value": 2.096446e08, "unit": u.m},
"log.initial.bondi.RadGyra": {"value": 0.400000},
"log.initial.bondi.RotAngMom": {
"value": 1.221650e37,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.bondi.RotKinEnergy": {"value": 8.884088e32, "unit": u.Joule},
"log.initial.bondi.RotVel": {"value": 3.049157e04, "unit": u.m / u.sec},
"log.initial.bondi.BodyType": {"value": 0.000000},
"log.initial.bondi.RotRate": {"value": 0.000145, "unit": 1 / u.sec},
"log.initial.bondi.RotPer": {"value": 0.500000, "unit": u.day},
"log.initial.bondi.Density": {"value": 0.309474, "unit": u.kg / u.m ** 3},
"log.initial.bondi.SurfEnFluxTotal": {
"value": 2.324795e04,
"unit": u.kg / u.sec ** 3,
},
"log.initial.bondi.TidalQ": {"value": -1.000000e05},
"log.initial.bondi.ImK2": {"value": -5.000000e-06},
"log.initial.bondi.K2": {"value": 0.500000},
"log.initial.bondi.K2Man": {"value": 0.300000},
"log.initial.bondi.Imk2Man": {"value": -0.003000},
"log.initial.bondi.TidalQMantle": {"value": 100.000000},
"log.initial.bondi.HEcc": {"value": 0.000000},
"log.initial.bondi.HZLimitDryRunaway": {"value": 3.098811e09, "unit": u.m},
"log.initial.bondi.HZLimRecVenus": {"value": 2.502002e09, "unit": u.m},
"log.initial.bondi.HZLimRunaway": {"value": 3.267138e09, "unit": u.m},
"log.initial.bondi.HZLimMoistGreenhouse": {"value": 3.310536e09, "unit": u.m},
"log.initial.bondi.HZLimMaxGreenhouse": {"value": 5.611497e09, "unit": u.m},
"log.initial.bondi.HZLimEarlyMars": {"value": 6.122597e09, "unit": u.m},
"log.initial.bondi.Instellation": {
"value": 69.788358,
"unit": u.kg / u.sec ** 3,
},
"log.initial.bondi.KEcc": {"value": 0.200000},
"log.initial.bondi.Eccentricity": {"value": 0.200000},
"log.initial.bondi.OrbEnergy": {"value": -5.298093e34, "unit": u.Joule},
"log.initial.bondi.MeanMotion": {"value": 6.296062e-06, "unit": 1 / u.sec},
"log.initial.bondi.OrbPeriod": {"value": 9.979547e05, "unit": u.sec},
"log.initial.bondi.SemiMajorAxis": {"value": 0.100000, "unit": u.au},
"log.initial.bondi.CriticalSemiMajorAxis": {"value": -1.000000, "unit": u.m},
"log.initial.bondi.COPP": {"value": 0.000000},
"log.initial.bondi.OrbAngMom": {
"value": 1.648983e40,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.bondi.LongP": {"value": 0.000000, "unit": u.rad},
"log.initial.bondi.LXUVTot": {"value": -1.000000, "unit": u.kg / u.sec ** 3},
"log.initial.bondi.TotOrbEnergy": {"value": -2.119237e35, "unit": u.Joule},
"log.initial.bondi.OrbPotEnergy": {"value": -1.059619e35, "unit": u.Joule},
"log.initial.bondi.LostEnergy": {"value": 5.562685e-309, "unit": u.Joule},
"log.initial.bondi.TidalRadius": {"value": 2.096446e08, "unit": u.m},
"log.initial.bondi.DsemiDtEqtide": {"value": 0.000192, "unit": u.m / u.sec},
"log.initial.bondi.DeccDtEqtide": {"value": 4.036352e-15, "unit": 1 / u.sec},
"log.initial.bondi.DMeanMotionDtEqtide": {
"value": -1.211805e-19,
"unit": 1 / u.sec ** 2,
},
"log.initial.bondi.DOrbPerDtEqtide": {"value": 1.920766e-08},
"log.initial.bondi.EccTimeEqtide": {"value": 4.954969e13, "unit": u.sec},
"log.initial.bondi.SemiTimeEqtide": {"value": 7.793412e13, "unit": u.sec},
"log.initial.bondi.DHEccDtEqtide": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.bondi.DKEccDtEqtide": {"value": 4.036352e-15, "unit": 1 / u.sec},
"log.initial.bondi.DXoblDtEqtide": {"value": 1.462258e-12, "unit": 1 / u.sec},
"log.initial.bondi.DYoblDtEqtide": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.bondi.DZoblDtEqtide": {"value": -1.462258e-12, "unit": 1 / u.sec},
"log.initial.bondi.LockTime": {"value": -1.000000, "unit": u.sec},
"log.initial.bondi.BodyDsemiDtEqtide": {"value": -1.000000},
"log.initial.bondi.BodyDeccDt": {"value": -1.000000},
"log.initial.bondi.DOblDtEqtide": {
"value": 2.067945e-12,
"unit": u.rad / u.sec,
},
"log.initial.bondi.DRotPerDtEqtide": {"value": 3.287202e-07},
"log.initial.bondi.DRotRateDtEqtide": {
"value": -1.106722e-15,
"unit": 1 / u.sec ** 2,
},
"log.initial.bondi.EqRotRateDiscrete": {
"value": 6.296062e-06,
"unit": 1 / u.sec,
},
"log.initial.bondi.EqRotPerDiscrete": {"value": 9.979547e05, "unit": u.sec},
"log.initial.bondi.EqRotRateCont": {"value": 8.688566e-06, "unit": 1 / u.sec},
"log.initial.bondi.EqRotPerCont": {"value": 7.231556e05, "unit": u.sec},
"log.initial.bondi.EqRotPer": {"value": 9.979547e05, "unit": u.sec},
"log.initial.bondi.EqTidePower": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.bondi.GammaRot": {"value": -1.000000, "unit": u.sec},
"log.initial.bondi.GammaOrb": {"value": -1.000000, "unit": u.sec},
"log.initial.bondi.OceanK2": {"value": 0.010000},
"log.initial.bondi.EnvTidalQ": {"value": -1.000000},
"log.initial.bondi.OceanTidalQ": {"value": -1.000000},
"log.initial.bondi.TideLock": {"value": 0.000000},
"log.initial.bondi.RotTimeEqtide": {"value": 1.314188e11, "unit": u.sec},
"log.initial.bondi.EnvK2": {"value": 0.500000},
"log.initial.bondi.OblTimeEqtide": {"value": -1.000000},
"log.initial.bondi.PowerEqtide": {"value": 1.284046e22, "unit": u.W},
"log.initial.bondi.SurfEnFluxEqtide": {
"value": 2.324895e04,
"unit": u.kg / u.sec ** 3,
},
"log.initial.bondi.SurfWaterMass": {"value": 0.000000, "unit": u.kg},
"log.initial.bondi.EnvelopeMass": {"value": 1.000000, "unit": u.Mearth},
"log.initial.bondi.OxygenMass": {"value": 0.000000, "unit": u.kg},
"log.initial.bondi.RGLimit": {"value": 3.099115e09, "unit": u.m},
"log.initial.bondi.XO": {"value": 0.000000},
"log.initial.bondi.EtaO": {"value": 0.000000},
"log.initial.bondi.PlanetRadius": {"value": 32.869442, "unit": u.Rearth},
"log.initial.bondi.OxygenMantleMass": {"value": 0.000000, "unit": u.kg},
"log.initial.bondi.RadXUV": {"value": -1.000000, "unit": u.m},
"log.initial.bondi.RadSolid": {"value": -1.000000, "unit": u.m},
"log.initial.bondi.PresXUV": {"value": 5.000000},
"log.initial.bondi.ScaleHeight": {"value": -1.000000, "unit": u.m},
"log.initial.bondi.ThermTemp": {"value": 400.000000, "unit": u.K},
"log.initial.bondi.AtmGasConst": {"value": 4124.000000},
"log.initial.bondi.PresSurf": {"value": -1.000000, "unit": u.Pa},
"log.initial.bondi.DEnvMassDt": {"value": -1.230386e15, "unit": u.kg / u.sec},
"log.initial.bondi.FXUV": {"value": 0.069788, "unit": u.W / u.m ** 2},
"log.initial.bondi.AtmXAbsEffH2O": {"value": 0.300000},
"log.initial.bondi.RocheRadius": {"value": 1.885546e08, "unit": u.m},
"log.initial.bondi.BondiRadius": {"value": 7.899468e08, "unit": u.m},
"log.initial.bondi.HEscapeRegime": {"value": 5.000000},
"log.initial.bondi.RRCriticalFlux": {"value": 0.000139, "unit": u.W / u.m ** 2},
"log.initial.bondi.KTide": {"value": 1.000000},
"log.initial.bondi.RGDuration": {"value": 1.00000e06, "unit": u.yr},
"log.initial.el.Mass": {"value": 2.000000, "unit": u.Mearth},
"log.initial.el.Obliquity": {"value": 0.410152, "unit": u.rad},
"log.initial.el.PrecA": {"value": 0.000000, "unit": u.rad},
"log.initial.el.Xobl": {"value": 0.398749},
"log.initial.el.Yobl": {"value": 0.000000},
"log.initial.el.Zobl": {"value": 0.917060},
"log.initial.el.Radius": {"value": 2.096446e08, "unit": u.m},
"log.initial.el.RadGyra": {"value": 0.400000},
"log.initial.el.RotAngMom": {
"value": 6.108249e36,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.el.RotKinEnergy": {"value": 2.221022e32, "unit": u.Joule},
"log.initial.el.RotVel": {"value": 1.524578e04, "unit": u.m / u.sec},
"log.initial.el.BodyType": {"value": 0.000000},
"log.initial.el.RotRate": {"value": 7.272205e-05, "unit": 1 / u.sec},
"log.initial.el.RotPer": {"value": 1.000000, "unit": u.day},
"log.initial.el.Density": {"value": 0.309474, "unit": u.kg / u.m ** 3},
"log.initial.el.SurfEnFluxTotal": {
"value": 1.100803e04,
"unit": u.kg / u.sec ** 3,
},
"log.initial.el.TidalQ": {"value": -1.000000e05},
"log.initial.el.ImK2": {"value": -5.000000e-06},
"log.initial.el.K2": {"value": 0.500000},
"log.initial.el.K2Man": {"value": 0.300000},
"log.initial.el.Imk2Man": {"value": -0.003000},
"log.initial.el.TidalQMantle": {"value": 100.000000},
"log.initial.el.HEcc": {"value": 0.000000},
"log.initial.el.HZLimitDryRunaway": {"value": 3.098811e09, "unit": u.m},
"log.initial.el.HZLimRecVenus": {"value": 2.502002e09, "unit": u.m},
"log.initial.el.HZLimRunaway": {"value": 3.267138e09, "unit": u.m},
"log.initial.el.HZLimMoistGreenhouse": {"value": 3.310536e09, "unit": u.m},
"log.initial.el.HZLimMaxGreenhouse": {"value": 5.611497e09, "unit": u.m},
"log.initial.el.HZLimEarlyMars": {"value": 6.122597e09, "unit": u.m},
"log.initial.el.Instellation": {"value": 69.788358, "unit": u.kg / u.sec ** 3},
"log.initial.el.KEcc": {"value": 0.200000},
"log.initial.el.Eccentricity": {"value": 0.200000},
"log.initial.el.OrbEnergy": {"value": -5.298093e34, "unit": u.Joule},
"log.initial.el.MeanMotion": {"value": 6.296062e-06, "unit": 1 / u.sec},
"log.initial.el.OrbPeriod": {"value": 9.979547e05, "unit": u.sec},
"log.initial.el.SemiMajorAxis": {"value": 0.100000, "unit": u.au},
"log.initial.el.CriticalSemiMajorAxis": {"value": -1.000000, "unit": u.m},
"log.initial.el.COPP": {"value": 0.000000},
"log.initial.el.OrbAngMom": {
"value": 1.648983e40,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.el.LongP": {"value": 0.000000, "unit": u.rad},
"log.initial.el.LXUVTot": {"value": -1.000000, "unit": u.kg / u.sec ** 3},
"log.initial.el.TotOrbEnergy": {"value": -2.119237e35, "unit": u.Joule},
"log.initial.el.OrbPotEnergy": {"value": -1.059619e35, "unit": u.Joule},
"log.initial.el.LostEnergy": {"value": 5.562685e-309, "unit": u.Joule},
"log.initial.el.TidalRadius": {"value": 2.096446e08, "unit": u.m},
"log.initial.el.DsemiDtEqtide": {"value": 0.000192, "unit": u.m / u.sec},
"log.initial.el.DeccDtEqtide": {"value": 4.036352e-15, "unit": 1 / u.sec},
"log.initial.el.DMeanMotionDtEqtide": {
"value": -1.211805e-19,
"unit": 1 / u.sec ** 2,
},
"log.initial.el.DOrbPerDtEqtide": {"value": 1.920766e-08},
"log.initial.el.EccTimeEqtide": {"value": 4.954969e13, "unit": u.sec},
"log.initial.el.SemiTimeEqtide": {"value": 7.793412e13, "unit": u.sec},
"log.initial.el.DHEccDtEqtide": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.el.DKEccDtEqtide": {"value": 4.036352e-15, "unit": 1 / u.sec},
"log.initial.el.DXoblDtEqtide": {"value": 2.139632e-12, "unit": 1 / u.sec},
"log.initial.el.DYoblDtEqtide": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.el.DZoblDtEqtide": {"value": -9.303384e-13, "unit": 1 / u.sec},
"log.initial.el.LockTime": {"value": -1.000000, "unit": u.sec},
"log.initial.el.BodyDsemiDtEqtide": {"value": -1.000000},
"log.initial.el.BodyDeccDt": {"value": -1.000000},
"log.initial.el.DOblDtEqtide": {"value": 2.333143e-12, "unit": u.rad / u.sec},
"log.initial.el.DRotPerDtEqtide": {"value": 1.314881e-06},
"log.initial.el.DRotRateDtEqtide": {
"value": -1.106722e-15,
"unit": 1 / u.sec ** 2,
},
"log.initial.el.EqRotRateDiscrete": {"value": 6.296062e-06, "unit": 1 / u.sec},
"log.initial.el.EqRotPerDiscrete": {"value": 9.979547e05, "unit": u.sec},
"log.initial.el.EqRotRateCont": {"value": 8.688566e-06, "unit": 1 / u.sec},
"log.initial.el.EqRotPerCont": {"value": 7.231556e05, "unit": u.sec},
"log.initial.el.EqRotPer": {"value": 9.979547e05, "unit": u.sec},
"log.initial.el.EqTidePower": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.el.GammaRot": {"value": -1.000000, "unit": u.sec},
"log.initial.el.GammaOrb": {"value": -1.000000, "unit": u.sec},
"log.initial.el.OceanK2": {"value": 0.010000},
"log.initial.el.EnvTidalQ": {"value": -1.000000},
"log.initial.el.OceanTidalQ": {"value": -1.000000},
"log.initial.el.TideLock": {"value": 0.000000},
"log.initial.el.RotTimeEqtide": {"value": 6.570938e10, "unit": u.sec},
"log.initial.el.EnvK2": {"value": 0.500000},
"log.initial.el.OblTimeEqtide": {"value": -1.000000},
"log.initial.el.PowerEqtide": {"value": 6.080320e21, "unit": u.W},
"log.initial.el.SurfEnFluxEqtide": {
"value": 1.100903e04,
"unit": u.kg / u.sec ** 3,
},
"log.initial.el.SurfWaterMass": {"value": 0.000000, "unit": u.kg},
"log.initial.el.EnvelopeMass": {"value": 1.000000, "unit": u.Mearth},
"log.initial.el.OxygenMass": {"value": 0.000000, "unit": u.kg},
"log.initial.el.RGLimit": {"value": 3.099115e09, "unit": u.m},
"log.initial.el.XO": {"value": 0.000000},
"log.initial.el.EtaO": {"value": 0.000000},
"log.initial.el.PlanetRadius": {"value": 32.869442, "unit": u.Rearth},
"log.initial.el.OxygenMantleMass": {"value": 0.000000, "unit": u.kg},
"log.initial.el.RadXUV": {"value": -1.000000, "unit": u.m},
"log.initial.el.RadSolid": {"value": -1.000000, "unit": u.m},
"log.initial.el.PresXUV": {"value": 5.000000},
"log.initial.el.ScaleHeight": {"value": -1.000000, "unit": u.m},
"log.initial.el.ThermTemp": {"value": 400.000000, "unit": u.K},
"log.initial.el.AtmGasConst": {"value": 4124.000000},
"log.initial.el.PresSurf": {"value": -1.000000, "unit": u.Pa},
"log.initial.el.DEnvMassDt": {"value": -2.508715e09, "unit": u.kg / u.sec},
"log.initial.el.FXUV": {"value": 0.069788, "unit": u.W / u.m ** 2},
"log.initial.el.AtmXAbsEffH2O": {"value": 0.300000},
"log.initial.el.RocheRadius": {"value": 1.885546e08, "unit": u.m},
"log.initial.el.BondiRadius": {"value": 7.899468e08, "unit": u.m},
"log.initial.el.HEscapeRegime": {"value": 3.000000},
"log.initial.el.RRCriticalFlux": {"value": 0.000139, "unit": u.W / u.m ** 2},
"log.initial.el.KTide": {"value": 1.000000},
"log.initial.el.RGDuration": {"value": 1.00000e06, "unit": u.yr},
"log.initial.rr.Mass": {"value": 2.000000, "unit": u.Mearth},
"log.initial.rr.Obliquity": {"value": 0.785398, "unit": u.rad},
"log.initial.rr.PrecA": {"value": 0.000000, "unit": u.rad},
"log.initial.rr.Xobl": {"value": 0.707107},
"log.initial.rr.Yobl": {"value": 0.000000},
"log.initial.rr.Zobl": {"value": 0.707107},
"log.initial.rr.Radius": {"value": 2.096446e08, "unit": u.m},
"log.initial.rr.RadGyra": {"value": 0.400000},
"log.initial.rr.RotAngMom": {
"value": 1.221650e37,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.rr.RotKinEnergy": {"value": 8.884088e32, "unit": u.Joule},
"log.initial.rr.RotVel": {"value": 3.049157e04, "unit": u.m / u.sec},
"log.initial.rr.BodyType": {"value": 0.000000},
"log.initial.rr.RotRate": {"value": 0.000145, "unit": 1 / u.sec},
"log.initial.rr.RotPer": {"value": 0.500000, "unit": u.day},
"log.initial.rr.Density": {"value": 0.309474, "unit": u.kg / u.m ** 3},
"log.initial.rr.SurfEnFluxTotal": {
"value": 2.324795e04,
"unit": u.kg / u.sec ** 3,
},
"log.initial.rr.TidalQ": {"value": -1.000000e05},
"log.initial.rr.ImK2": {"value": -5.000000e-06},
"log.initial.rr.K2": {"value": 0.500000},
"log.initial.rr.K2Man": {"value": 0.300000},
"log.initial.rr.Imk2Man": {"value": -0.003000},
"log.initial.rr.TidalQMantle": {"value": 100.000000},
"log.initial.rr.HEcc": {"value": 0.000000},
"log.initial.rr.HZLimitDryRunaway": {"value": 3.098811e09, "unit": u.m},
"log.initial.rr.HZLimRecVenus": {"value": 2.502002e09, "unit": u.m},
"log.initial.rr.HZLimRunaway": {"value": 3.267138e09, "unit": u.m},
"log.initial.rr.HZLimMoistGreenhouse": {"value": 3.310536e09, "unit": u.m},
"log.initial.rr.HZLimMaxGreenhouse": {"value": 5.611497e09, "unit": u.m},
"log.initial.rr.HZLimEarlyMars": {"value": 6.122597e09, "unit": u.m},
"log.initial.rr.Instellation": {"value": 69.788358, "unit": u.kg / u.sec ** 3},
"log.initial.rr.KEcc": {"value": 0.200000},
"log.initial.rr.Eccentricity": {"value": 0.200000},
"log.initial.rr.OrbEnergy": {"value": -5.298093e34, "unit": u.Joule},
"log.initial.rr.MeanMotion": {"value": 6.296062e-06, "unit": 1 / u.sec},
"log.initial.rr.OrbPeriod": {"value": 9.979547e05, "unit": u.sec},
"log.initial.rr.SemiMajorAxis": {"value": 0.100000, "unit": u.au},
"log.initial.rr.CriticalSemiMajorAxis": {"value": -1.000000, "unit": u.m},
"log.initial.rr.COPP": {"value": 0.000000},
"log.initial.rr.OrbAngMom": {
"value": 1.648983e40,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.rr.LongP": {"value": 0.000000, "unit": u.rad},
"log.initial.rr.LXUVTot": {"value": -1.000000, "unit": u.kg / u.sec ** 3},
"log.initial.rr.TotOrbEnergy": {"value": -2.119237e35, "unit": u.Joule},
"log.initial.rr.OrbPotEnergy": {"value": -1.059619e35, "unit": u.Joule},
"log.initial.rr.LostEnergy": {"value": 5.562685e-309, "unit": u.Joule},
"log.initial.rr.TidalRadius": {"value": 2.096446e08, "unit": u.m},
"log.initial.rr.DsemiDtEqtide": {"value": 0.000192, "unit": u.m / u.sec},
"log.initial.rr.DeccDtEqtide": {"value": 4.036352e-15, "unit": 1 / u.sec},
"log.initial.rr.DMeanMotionDtEqtide": {
"value": -1.211805e-19,
"unit": 1 / u.sec ** 2,
},
"log.initial.rr.DOrbPerDtEqtide": {"value": 1.920766e-08},
"log.initial.rr.EccTimeEqtide": {"value": 4.954969e13, "unit": u.sec},
"log.initial.rr.SemiTimeEqtide": {"value": 7.793412e13, "unit": u.sec},
"log.initial.rr.DHEccDtEqtide": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.rr.DKEccDtEqtide": {"value": 4.036352e-15, "unit": 1 / u.sec},
"log.initial.rr.DXoblDtEqtide": {"value": 1.462258e-12, "unit": 1 / u.sec},
"log.initial.rr.DYoblDtEqtide": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.rr.DZoblDtEqtide": {"value": -1.462258e-12, "unit": 1 / u.sec},
"log.initial.rr.LockTime": {"value": -1.000000, "unit": u.sec},
"log.initial.rr.BodyDsemiDtEqtide": {"value": -1.000000},
"log.initial.rr.BodyDeccDt": {"value": -1.000000},
"log.initial.rr.DOblDtEqtide": {"value": 2.067945e-12, "unit": u.rad / u.sec},
"log.initial.rr.DRotPerDtEqtide": {"value": 3.287202e-07},
"log.initial.rr.DRotRateDtEqtide": {
"value": -1.106722e-15,
"unit": 1 / u.sec ** 2,
},
"log.initial.rr.EqRotRateDiscrete": {"value": 6.296062e-06, "unit": 1 / u.sec},
"log.initial.rr.EqRotPerDiscrete": {"value": 9.979547e05, "unit": u.sec},
"log.initial.rr.EqRotRateCont": {"value": 8.688566e-06, "unit": 1 / u.sec},
"log.initial.rr.EqRotPerCont": {"value": 7.231556e05, "unit": u.sec},
"log.initial.rr.EqRotPer": {"value": 9.979547e05, "unit": u.sec},
"log.initial.rr.EqTidePower": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.rr.GammaRot": {"value": -1.000000, "unit": u.sec},
"log.initial.rr.GammaOrb": {"value": -1.000000, "unit": u.sec},
"log.initial.rr.OceanK2": {"value": 0.010000},
"log.initial.rr.EnvTidalQ": {"value": -1.000000},
"log.initial.rr.OceanTidalQ": {"value": -1.000000},
"log.initial.rr.TideLock": {"value": 0.000000},
"log.initial.rr.RotTimeEqtide": {"value": 1.314188e11, "unit": u.sec},
"log.initial.rr.EnvK2": {"value": 0.500000},
"log.initial.rr.OblTimeEqtide": {"value": -1.000000},
"log.initial.rr.PowerEqtide": {"value": 1.284046e22, "unit": u.W},
"log.initial.rr.SurfEnFluxEqtide": {
"value": 2.324895e04,
"unit": u.kg / u.sec ** 3,
},
"log.initial.rr.SurfWaterMass": {"value": 0.000000, "unit": u.kg},
"log.initial.rr.EnvelopeMass": {"value": 1.000000, "unit": u.Mearth},
"log.initial.rr.OxygenMass": {"value": 0.000000, "unit": u.kg},
"log.initial.rr.RGLimit": {"value": 3.099115e09, "unit": u.m},
"log.initial.rr.XO": {"value": 0.000000},
"log.initial.rr.EtaO": {"value": 0.000000},
"log.initial.rr.PlanetRadius": {"value": 32.869442, "unit": u.Rearth},
"log.initial.rr.OxygenMantleMass": {"value": 0.000000, "unit": u.kg},
"log.initial.rr.RadXUV": {"value": -1.000000, "unit": u.m},
"log.initial.rr.RadSolid": {"value": -1.000000, "unit": u.m},
"log.initial.rr.PresXUV": {"value": 5.000000},
"log.initial.rr.ScaleHeight": {"value": -1.000000, "unit": u.m},
"log.initial.rr.ThermTemp": {"value": 400.000000, "unit": u.K},
"log.initial.rr.AtmGasConst": {"value": 4124.000000},
"log.initial.rr.PresSurf": {"value": -1.000000, "unit": u.Pa},
"log.initial.rr.DEnvMassDt": {"value": -1.119308e08, "unit": u.kg / u.sec},
"log.initial.rr.FXUV": {"value": 0.069788, "unit": u.W / u.m ** 2},
"log.initial.rr.AtmXAbsEffH2O": {"value": 0.300000},
"log.initial.rr.RocheRadius": {"value": 1.885546e08, "unit": u.m},
"log.initial.rr.BondiRadius": {"value": 7.899468e08, "unit": u.m},
"log.initial.rr.HEscapeRegime": {"value": 6.000000},
"log.initial.rr.RRCriticalFlux": {"value": 0.000139, "unit": u.W / u.m ** 2},
"log.initial.rr.KTide": {"value": 1.000000},
"log.initial.rr.RGDuration": {"value": 1.00000e06, "unit": u.yr},
"log.final.system.Age": {"value": 6.311520e13, "unit": u.sec, "rtol": 1e-4},
"log.final.system.Time": {"value": 3.155760e13, "unit": u.sec, "rtol": 1e-4},
"log.final.system.TotAngMom": {
"value": 5.425277e40,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.system.TotEnergy": {
"value": -2.482441e43,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.system.PotEnergy": {
"value": -2.482440e43,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.system.KinEnergy": {
"value": 5.347271e34,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.system.DeltaTime": {
"value": 4.863245e08,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.star.Mass": {"value": 1.988416e30, "unit": u.kg, "rtol": 1e-4},
"log.final.star.Obliquity": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.star.PrecA": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.star.Xobl": {"value": 0.000000, "rtol": 1e-4},
"log.final.star.Yobl": {"value": 0.000000, "rtol": 1e-4},
"log.final.star.Zobl": {"value": 1.000000, "rtol": 1e-4},
"log.final.star.Radius": {"value": 6.378100e06, "unit": u.m, "rtol": 1e-4},
"log.final.star.RadGyra": {"value": 0.500000, "rtol": 1e-4},
"log.final.star.RotAngMom": {
"value": 1.470605e39,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.star.RotKinEnergy": {
"value": 5.347271e34,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.star.RotVel": {
"value": 463.828520,
"unit": u.m / u.sec,
"rtol": 1e-4,
},
"log.final.star.BodyType": {"value": 0.000000, "rtol": 1e-4},
"log.final.star.RotRate": {
"value": 7.272205e-05,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.star.RotPer": {"value": 8.640000e04, "unit": u.sec, "rtol": 1e-4},
"log.final.star.Density": {
"value": 1.829552e09,
"unit": u.kg / u.m ** 3,
"rtol": 1e-4,
},
"log.final.star.SurfEnFluxTotal": {
"value": 3.025764e-12,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.star.TidalQ": {"value": 1.000000e06, "rtol": 1e-4},
"log.final.star.ImK2": {"value": -5.000000e-07, "rtol": 1e-4},
"log.final.star.K2": {"value": 0.500000, "rtol": 1e-4},
"log.final.star.K2Man": {"value": 0.010000, "rtol": 1e-4},
"log.final.star.Imk2Man": {"value": 0.000000, "rtol": 1e-4},
"log.final.star.TidalQMantle": {"value": 100.000000, "rtol": 1e-4},
"log.final.star.HEcc": {"value": 0.000000, "rtol": 1e-4},
"log.final.star.HZLimitDryRunaway": {
"value": 3.036202e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.star.HZLimRecVenus": {
"value": 2.502002e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.star.HZLimRunaway": {
"value": 3.267138e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.star.HZLimMoistGreenhouse": {
"value": 3.310536e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.star.HZLimMaxGreenhouse": {
"value": 5.611497e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.star.HZLimEarlyMars": {
"value": 6.122597e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.star.Instellation": {
"value": -1.000000,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.star.KEcc": {"value": 0.000000, "rtol": 1e-4},
"log.final.star.Eccentricity": {"value": -1.000000, "rtol": 1e-4},
"log.final.star.OrbEnergy": {"value": 0.000000, "unit": u.Joule, "rtol": 1e-4},
"log.final.star.MeanMotion": {
"value": -1.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.star.OrbPeriod": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.star.SemiMajorAxis": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.star.CriticalSemiMajorAxis": {
"value": -1.000000,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.star.COPP": {"value": 0.000000, "rtol": 1e-4},
"log.final.star.OrbAngMom": {
"value": 0.000000,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.star.LongP": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.star.LXUVTot": {
"value": 1.923000e20,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.star.TotOrbEnergy": {
"value": -1.740032e35,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.star.OrbPotEnergy": {
"value": -1.000000,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.star.LostEnergy": {
"value": 2.550749e26,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.star.LostAngMom": {
"value": 3.507532e30,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.star.LockTime": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.star.BodyDsemiDtEqtide": {"value": -1.000000, "rtol": 1e-4},
"log.final.star.BodyDeccDt": {"value": -1.000000, "rtol": 1e-4},
"log.final.star.DOblDtEqtide": {
"value": 0.000000,
"unit": u.rad / u.sec,
"rtol": 1e-4,
},
"log.final.star.DRotPerDtEqtide": {"value": 1.380178e-27, "rtol": 1e-4},
"log.final.star.DRotRateDtEqtide": {
"value": -1.161683e-36,
"unit": 1 / u.sec ** 2,
"rtol": 1e-4,
},
"log.final.star.EqRotRateDiscrete": {
"value": 6.510710e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.star.EqRotPerDiscrete": {
"value": 9.650538e05,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.star.EqRotRateCont": {
"value": 7.554427e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.star.EqRotPerCont": {
"value": 8.317222e05,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.star.EqRotPer": {"value": 9.650538e05, "unit": u.sec, "rtol": 1e-4},
"log.final.star.EqTidePower": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.star.GammaRot": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.star.GammaOrb": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.star.OceanK2": {"value": 0.010000, "rtol": 1e-4},
"log.final.star.EnvTidalQ": {"value": -1.000000, "rtol": 1e-4},
"log.final.star.OceanTidalQ": {"value": -1.000000, "rtol": 1e-4},
"log.final.star.TideLock": {"value": 0.000000, "rtol": 1e-4},
"log.final.star.RotTimeEqtide": {
"value": 2.047960e-37,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.star.EnvK2": {"value": 0.010000, "rtol": 1e-4},
"log.final.star.OblTimeEqtide": {"value": -1.000000, "rtol": 1e-4},
"log.final.star.PowerEqtide": {"value": 1546.776661, "unit": u.W, "rtol": 1e-4},
"log.final.star.SurfEnFluxEqtide": {
"value": 3.025764e-12,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.star.Luminosity": {"value": 1.923000e23, "unit": u.W, "rtol": 1e-4},
"log.final.star.LXUVStellar": {"value": 1.923000e20, "unit": u.W, "rtol": 1e-4},
"log.final.star.Temperature": {"value": 5778.000000, "unit": u.K, "rtol": 1e-4},
"log.final.star.LXUVFrac": {"value": 0.001000, "rtol": 1e-4},
"log.final.star.RossbyNumber": {"value": 0.078260, "rtol": 1e-4},
"log.final.star.DRotPerDtStellar": {"value": 6.530034e-18, "rtol": 1e-4},
"log.final.auto.Mass": {"value": 1.411359, "unit": u.Mearth, "rtol": 1e-4},
"log.final.auto.Obliquity": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.auto.PrecA": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.auto.Xobl": {"value": 1.570471e-162, "rtol": 1e-4},
"log.final.auto.Yobl": {"value": 0.000000, "rtol": 1e-4},
"log.final.auto.Zobl": {"value": 1.000002, "rtol": 1e-4},
"log.final.auto.Radius": {"value": 1.630278e08, "unit": u.m, "rtol": 1e-4},
"log.final.auto.RadGyra": {"value": 0.400000, "rtol": 1e-4},
"log.final.auto.RotAngMom": {
"value": 2.333687e35,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.auto.RotKinEnergy": {
"value": 7.596980e29,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.auto.RotVel": {
"value": 1061.426957,
"unit": u.m / u.sec,
"rtol": 1e-4,
},
"log.final.auto.BodyType": {"value": 0.000000, "rtol": 1e-4},
"log.final.auto.RotRate": {
"value": 6.510710e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.auto.RotPer": {"value": 11.169604, "unit": u.day, "rtol": 1e-4},
"log.final.auto.Density": {
"value": 0.464405,
"unit": u.kg / u.m ** 3,
"rtol": 1e-4,
},
"log.final.auto.SurfEnFluxTotal": {
"value": 52.542587,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.auto.TidalQ": {"value": -1.000000e05, "rtol": 1e-4},
"log.final.auto.ImK2": {"value": -5.000000e-06, "rtol": 1e-4},
"log.final.auto.K2": {"value": 0.500000, "rtol": 1e-4},
"log.final.auto.K2Man": {"value": 0.300000, "rtol": 1e-4},
"log.final.auto.Imk2Man": {"value": -0.003000, "rtol": 1e-4},
"log.final.auto.TidalQMantle": {"value": 100.000000, "rtol": 1e-4},
"log.final.auto.HEcc": {"value": 0.000000, "rtol": 1e-4},
"log.final.auto.HZLimitDryRunaway": {
"value": 3.062148e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.auto.HZLimRecVenus": {
"value": 2.502002e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.auto.HZLimRunaway": {
"value": 3.267138e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.auto.HZLimMoistGreenhouse": {
"value": 3.310536e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.auto.HZLimMaxGreenhouse": {
"value": 5.611497e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.auto.HZLimEarlyMars": {
"value": 6.122597e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.auto.Instellation": {
"value": 72.115242,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.auto.KEcc": {"value": 0.129902, "rtol": 1e-4},
"log.final.auto.Eccentricity": {"value": 0.129902, "rtol": 1e-4},
"log.final.auto.OrbEnergy": {
"value": -3.823257e34,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.auto.MeanMotion": {
"value": 6.510710e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.auto.OrbPeriod": {"value": 9.650538e05, "unit": u.sec, "rtol": 1e-4},
"log.final.auto.SemiMajorAxis": {"value": 0.097790, "unit": u.au, "rtol": 1e-4},
"log.final.auto.CriticalSemiMajorAxis": {
"value": -1.000000,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.auto.COPP": {"value": 0.000000, "rtol": 1e-4},
"log.final.auto.OrbAngMom": {
"value": 1.164500e40,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.auto.LongP": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.auto.LXUVTot": {
"value": -1.000000,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.auto.TotOrbEnergy": {
"value": -1.740032e35,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.auto.OrbPotEnergy": {
"value": -7.646514e34,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.auto.LostEnergy": {
"value": 1.264526e33,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.auto.TidalRadius": {"value": 1.630278e08, "unit": u.m, "rtol": 1e-4},
"log.final.auto.DsemiDtEqtide": {
"value": -6.842553e-06,
"unit": u.m / u.sec,
"rtol": 1e-4,
},
"log.final.auto.DeccDtEqtide": {
"value": -1.800337e-15,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.auto.DMeanMotionDtEqtide": {
"value": 4.567924e-21,
"unit": 1 / u.sec ** 2,
"rtol": 1e-4,
},
"log.final.auto.DOrbPerDtEqtide": {"value": -6.770832e-10, "rtol": 1e-4},
"log.final.auto.EccTimeEqtide": {
"value": 7.215422e13,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.auto.SemiTimeEqtide": {
"value": 2.137966e15,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.auto.DHEccDtEqtide": {
"value": -0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.auto.DKEccDtEqtide": {
"value": -1.800337e-15,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.auto.DXoblDtEqtide": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.auto.DYoblDtEqtide": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.auto.DZoblDtEqtide": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.auto.LockTime": {"value": 2.398144e11, "unit": u.sec, "rtol": 1e-4},
"log.final.auto.BodyDsemiDtEqtide": {"value": -1.000000, "rtol": 1e-4},
"log.final.auto.BodyDeccDt": {"value": -1.000000, "rtol": 1e-4},
"log.final.auto.DOblDtEqtide": {
"value": 0.000000,
"unit": u.rad / u.sec,
"rtol": 1e-4,
},
"log.final.auto.DRotPerDtEqtide": {"value": -8.245322e-298, "rtol": 1e-4},
"log.final.auto.DRotRateDtEqtide": {
"value": 5.562685e-309,
"unit": 1 / u.sec ** 2,
"rtol": 1e-4,
},
"log.final.auto.EqRotRateDiscrete": {
"value": 6.510710e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.auto.EqRotPerDiscrete": {
"value": 9.650538e05,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.auto.EqRotRateCont": {
"value": 7.554427e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.auto.EqRotPerCont": {
"value": 8.317222e05,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.auto.EqRotPer": {"value": 9.650538e05, "unit": u.sec, "rtol": 1e-4},
"log.final.auto.EqTidePower": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.auto.GammaRot": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.auto.GammaOrb": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.auto.OceanK2": {"value": 0.010000, "rtol": 1e-4},
"log.final.auto.EnvTidalQ": {"value": -1.000000, "rtol": 1e-4},
"log.final.auto.OceanTidalQ": {"value": -1.000000, "rtol": 1e-4},
"log.final.auto.TideLock": {"value": 1.000000, "rtol": 1e-4},
"log.final.auto.RotTimeEqtide": {
"value": 1.170426e303,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.auto.EnvK2": {"value": 0.500000, "rtol": 1e-4},
"log.final.auto.OblTimeEqtide": {"value": -1.000000, "rtol": 1e-4},
"log.final.auto.PowerEqtide": {"value": 1.788269e19, "unit": u.W, "rtol": 1e-4},
"log.final.auto.SurfEnFluxEqtide": {
"value": 53.542587,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.auto.SurfWaterMass": {"value": 0.000000, "unit": u.kg, "rtol": 1e-4},
"log.final.auto.EnvelopeMass": {
"value": 0.411359,
"unit": u.Mearth,
"rtol": 1e-4,
},
"log.final.auto.OxygenMass": {"value": 0.000000, "unit": u.kg, "rtol": 1e-4},
"log.final.auto.RGLimit": {"value": 3.141989e09, "unit": u.m, "rtol": 1e-4},
"log.final.auto.XO": {"value": 0.000000, "rtol": 1e-4},
"log.final.auto.EtaO": {"value": 0.000000, "rtol": 1e-4},
"log.final.auto.PlanetRadius": {
"value": 25.560564,
"unit": u.Rearth,
"rtol": 1e-4,
},
"log.final.auto.OxygenMantleMass": {
"value": 0.000000,
"unit": u.kg,
"rtol": 1e-4,
},
"log.final.auto.RadXUV": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.auto.RadSolid": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.auto.PresXUV": {"value": 5.000000, "rtol": 1e-4},
"log.final.auto.ScaleHeight": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.auto.ThermTemp": {"value": 400.000000, "unit": u.K, "rtol": 1e-4},
"log.final.auto.AtmGasConst": {"value": 4124.000000, "rtol": 1e-4},
"log.final.auto.PresSurf": {"value": -1.000000, "unit": u.Pa, "rtol": 1e-4},
"log.final.auto.DEnvMassDt": {
"value": -7.802590e07,
"unit": u.kg / u.sec,
"rtol": 1e-4,
},
"log.final.auto.FXUV": {
"value": 0.072115,
"unit": u.W / u.m ** 2,
"rtol": 1e-4,
},
"log.final.auto.AtmXAbsEffH2O": {"value": 0.300000, "rtol": 1e-4},
"log.final.auto.RocheRadius": {"value": 1.641596e08, "unit": u.m, "rtol": 1e-4},
"log.final.auto.BondiRadius": {"value": 5.637137e08, "unit": u.m, "rtol": 1e-4},
"log.final.auto.HEscapeRegime": {"value": 6.000000, "rtol": 1e-4},
"log.final.auto.RRCriticalFlux": {
"value": 1.470664e-06,
"unit": u.W / u.m ** 2,
"rtol": 1e-4,
},
"log.final.auto.KTide": {"value": 0.100000, "rtol": 1e-4},
"log.final.auto.RGDuration": {"value": 1.00000e06, "unit": u.yr, "rtol": 1e-4},
"log.final.bondi.Mass": {"value": 1.000000, "unit": u.Mearth, "rtol": 1e-4},
"log.final.bondi.Obliquity": {
"value": 2.992105e-54,
"unit": u.rad,
"rtol": 1e-4,
},
"log.final.bondi.PrecA": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.bondi.Xobl": {"value": 2.992105e-54, "rtol": 1e-4},
"log.final.bondi.Yobl": {"value": 0.000000, "rtol": 1e-4},
"log.final.bondi.Zobl": {"value": 1.000000, "rtol": 1e-4},
"log.final.bondi.Radius": {"value": 6.378100e06, "unit": u.m, "rtol": 1e-4},
"log.final.bondi.RadGyra": {"value": 0.400000, "rtol": 1e-4},
"log.final.bondi.RotAngMom": {
"value": 2.447267e32,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.RotKinEnergy": {
"value": 7.703654e26,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.bondi.RotVel": {
"value": 40.154732,
"unit": u.m / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.BodyType": {"value": 0.000000, "rtol": 1e-4},
"log.final.bondi.RotRate": {
"value": 6.295720e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.RotPer": {"value": 11.551030, "unit": u.day, "rtol": 1e-4},
"log.final.bondi.Density": {
"value": 5495.038549,
"unit": u.kg / u.m ** 3,
"rtol": 1e-4,
},
"log.final.bondi.SurfEnFluxTotal": {
"value": 3.855526,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.bondi.TidalQ": {"value": -100.000000, "rtol": 1e-4},
"log.final.bondi.ImK2": {"value": -0.003000, "rtol": 1e-4},
"log.final.bondi.K2": {"value": 0.500000, "rtol": 1e-4},
"log.final.bondi.K2Man": {"value": 0.300000, "rtol": 1e-4},
"log.final.bondi.Imk2Man": {"value": -0.003000, "rtol": 1e-4},
"log.final.bondi.TidalQMantle": {"value": 100.000000, "rtol": 1e-4},
"log.final.bondi.HEcc": {"value": 0.000000, "rtol": 1e-4},
"log.final.bondi.HZLimitDryRunaway": {
"value": 3.098815e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.bondi.HZLimRecVenus": {
"value": 2.502002e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.bondi.HZLimRunaway": {
"value": 3.267138e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.bondi.HZLimMoistGreenhouse": {
"value": 3.310536e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.bondi.HZLimMaxGreenhouse": {
"value": 5.611497e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.bondi.HZLimEarlyMars": {
"value": 6.122597e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.bondi.Instellation": {
"value": 69.783534,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.bondi.KEcc": {"value": 0.200007, "rtol": 1e-4},
"log.final.bondi.Eccentricity": {"value": 0.200007, "rtol": 1e-4},
"log.final.bondi.OrbEnergy": {
"value": -2.648953e34,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.bondi.MeanMotion": {
"value": 6.295720e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.OrbPeriod": {
"value": 9.980090e05,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.bondi.SemiMajorAxis": {
"value": 0.100004,
"unit": u.au,
"rtol": 1e-4,
},
"log.final.bondi.CriticalSemiMajorAxis": {
"value": -1.000000,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.bondi.COPP": {"value": 0.000000, "rtol": 1e-4},
"log.final.bondi.OrbAngMom": {
"value": 8.245061e39,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.LongP": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.bondi.LXUVTot": {
"value": -1.000000,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.bondi.TotOrbEnergy": {
"value": -1.740032e35,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.bondi.OrbPotEnergy": {
"value": -5.297906e34,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.bondi.LostEnergy": {
"value": 3.022858e31,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.bondi.TidalRadius": {
"value": 6.378100e06,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.bondi.DsemiDtEqtide": {
"value": -1.113122e-09,
"unit": u.m / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.DeccDtEqtide": {
"value": -1.860062e-19,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.DMeanMotionDtEqtide": {
"value": 7.026492e-25,
"unit": 1 / u.sec ** 2,
"rtol": 1e-4,
},
"log.final.bondi.DOrbPerDtEqtide": {"value": -1.113852e-13, "rtol": 1e-4},
"log.final.bondi.EccTimeEqtide": {
"value": 1.075269e18,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.bondi.SemiTimeEqtide": {
"value": 1.343996e19,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.bondi.DHEccDtEqtide": {
"value": -0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.DKEccDtEqtide": {
"value": -1.860062e-19,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.DXoblDtEqtide": {
"value": -1.366908e-65,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.DYoblDtEqtide": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.DZoblDtEqtide": {
"value": 4.089932e-119,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.LockTime": {"value": 4.919130e12, "unit": u.sec, "rtol": 1e-4},
"log.final.bondi.BodyDsemiDtEqtide": {"value": -1.000000, "rtol": 1e-4},
"log.final.bondi.BodyDeccDt": {"value": -1.000000, "rtol": 1e-4},
"log.final.bondi.DOblDtEqtide": {
"value": -1.366908e-65,
"unit": u.rad / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.DRotPerDtEqtide": {"value": -8.818069e-298, "rtol": 1e-4},
"log.final.bondi.DRotRateDtEqtide": {
"value": 5.562685e-309,
"unit": 1 / u.sec ** 2,
"rtol": 1e-4,
},
"log.final.bondi.EqRotRateDiscrete": {
"value": 6.295720e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.EqRotPerDiscrete": {
"value": 9.980090e05,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.bondi.EqRotRateCont": {
"value": 8.688253e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.EqRotPerCont": {
"value": 7.231816e05,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.bondi.EqRotPer": {"value": 9.980090e05, "unit": u.sec, "rtol": 1e-4},
"log.final.bondi.EqTidePower": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.GammaRot": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.bondi.GammaOrb": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.bondi.OceanK2": {"value": 0.010000, "rtol": 1e-4},
"log.final.bondi.EnvTidalQ": {"value": -1.000000, "rtol": 1e-4},
"log.final.bondi.OceanTidalQ": {"value": -1.000000, "rtol": 1e-4},
"log.final.bondi.TideLock": {"value": 1.000000, "rtol": 1e-4},
"log.final.bondi.RotTimeEqtide": {
"value": 1.131777e303,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.bondi.EnvK2": {"value": 0.500000, "rtol": 1e-4},
"log.final.bondi.OblTimeEqtide": {"value": -1.000000, "rtol": 1e-4},
"log.final.bondi.PowerEqtide": {
"value": 1.970952e15,
"unit": u.W,
"rtol": 1e-4,
},
"log.final.bondi.SurfEnFluxEqtide": {
"value": 3.855526,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.bondi.SurfWaterMass": {
"value": 0.000000,
"unit": u.kg,
"rtol": 1e-4,
},
"log.final.bondi.EnvelopeMass": {
"value": 0.000000,
"unit": u.Mearth,
"rtol": 1e-4,
},
"log.final.bondi.OxygenMass": {"value": 0.000000, "unit": u.kg, "rtol": 1e-4},
"log.final.bondi.RGLimit": {"value": 3.147864e09, "unit": u.m, "rtol": 1e-4},
"log.final.bondi.XO": {"value": 0.000000, "rtol": 1e-4},
"log.final.bondi.EtaO": {"value": 0.000000, "rtol": 1e-4},
"log.final.bondi.PlanetRadius": {
"value": 1.000000,
"unit": u.Rearth,
"rtol": 1e-4,
},
"log.final.bondi.OxygenMantleMass": {
"value": 0.000000,
"unit": u.kg,
"rtol": 1e-4,
},
"log.final.bondi.RadXUV": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.bondi.RadSolid": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.bondi.PresXUV": {"value": 5.000000, "rtol": 1e-4},
"log.final.bondi.ScaleHeight": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.bondi.ThermTemp": {"value": 400.000000, "unit": u.K, "rtol": 1e-4},
"log.final.bondi.AtmGasConst": {"value": 4124.000000, "rtol": 1e-4},
"log.final.bondi.PresSurf": {"value": -1.000000, "unit": u.Pa, "rtol": 1e-4},
"log.final.bondi.DEnvMassDt": {
"value": 5.562685e-309,
"unit": u.kg / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.FXUV": {
"value": 0.069784,
"unit": u.W / u.m ** 2,
"rtol": 1e-4,
},
"log.final.bondi.AtmXAbsEffH2O": {"value": 0.300000, "rtol": 1e-4},
"log.final.bondi.RocheRadius": {
"value": 1.496611e08,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.bondi.BondiRadius": {
"value": 3.949665e08,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.bondi.HEscapeRegime": {"value": 8.000000, "rtol": 1e-4},
"log.final.bondi.RRCriticalFlux": {
"value": 1.080455,
"unit": u.W / u.m ** 2,
"rtol": 1e-4,
},
"log.final.bondi.KTide": {"value": 0.936113, "rtol": 1e-4},
"log.final.bondi.RGDuration": {"value": 1.00000e06, "unit": u.yr, "rtol": 1e-4},
"log.final.el.Mass": {"value": 1.986370, "unit": u.Mearth, "rtol": 1e-4},
"log.final.el.Obliquity": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.el.PrecA": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.el.Xobl": {"value": 1.563609e-162, "rtol": 1e-4},
"log.final.el.Yobl": {"value": 0.000000, "rtol": 1e-4},
"log.final.el.Zobl": {"value": 1.000000, "rtol": 1e-4},
"log.final.el.Radius": {"value": 2.084572e08, "unit": u.m, "rtol": 1e-4},
"log.final.el.RadGyra": {"value": 0.400000, "rtol": 1e-4},
"log.final.el.RotAngMom": {
"value": 5.470333e35,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.el.RotKinEnergy": {
"value": 1.814054e30,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.el.RotVel": {
"value": 1382.558099,
"unit": u.m / u.sec,
"rtol": 1e-4,
},
"log.final.el.BodyType": {"value": 0.000000, "rtol": 1e-4},
"log.final.el.RotRate": {
"value": 6.632335e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.el.RotPer": {"value": 10.964773, "unit": u.day, "rtol": 1e-4},
"log.final.el.Density": {
"value": 0.312647,
"unit": u.kg / u.m ** 3,
"rtol": 1e-4,
},
"log.final.el.SurfEnFluxTotal": {
"value": 33.098849,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.el.TidalQ": {"value": -1.000000e05, "rtol": 1e-4},
"log.final.el.ImK2": {"value": -5.000000e-06, "rtol": 1e-4},
"log.final.el.K2": {"value": 0.500000, "rtol": 1e-4},
"log.final.el.K2Man": {"value": 0.300000, "rtol": 1e-4},
"log.final.el.Imk2Man": {"value": -0.003000, "rtol": 1e-4},
"log.final.el.TidalQMantle": {"value": 100.000000, "rtol": 1e-4},
"log.final.el.HEcc": {"value": 0.000000, "rtol": 1e-4},
"log.final.el.HZLimitDryRunaway": {
"value": 3.043341e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.el.HZLimRecVenus": {"value": 2.502002e09, "unit": u.m, "rtol": 1e-4},
"log.final.el.HZLimRunaway": {"value": 3.267138e09, "unit": u.m, "rtol": 1e-4},
"log.final.el.HZLimMoistGreenhouse": {
"value": 3.310536e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.el.HZLimMaxGreenhouse": {
"value": 5.611497e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.el.HZLimEarlyMars": {
"value": 6.122597e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.el.Instellation": {
"value": 73.462978,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.el.KEcc": {"value": 0.068456, "rtol": 1e-4},
"log.final.el.Eccentricity": {"value": 0.068456, "rtol": 1e-4},
"log.final.el.OrbEnergy": {
"value": -5.447720e34,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.el.MeanMotion": {
"value": 6.632335e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.el.OrbPeriod": {"value": 9.473564e05, "unit": u.sec, "rtol": 1e-4},
"log.final.el.SemiMajorAxis": {"value": 0.096591, "unit": u.au, "rtol": 1e-4},
"log.final.el.CriticalSemiMajorAxis": {
"value": -1.000000,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.el.COPP": {"value": 0.000000, "rtol": 1e-4},
"log.final.el.OrbAngMom": {
"value": 1.638922e40,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.el.LongP": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.el.LXUVTot": {
"value": -1.000000,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.el.TotOrbEnergy": {
"value": -1.740032e35,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.el.OrbPotEnergy": {
"value": -1.089544e35,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.el.LostEnergy": {
"value": 2.085924e33,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.el.TidalRadius": {"value": 2.084572e08, "unit": u.m, "rtol": 1e-4},
"log.final.el.DsemiDtEqtide": {
"value": -4.938887e-06,
"unit": u.m / u.sec,
"rtol": 1e-4,
},
"log.final.el.DeccDtEqtide": {
"value": -2.496493e-15,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.el.DMeanMotionDtEqtide": {
"value": 3.400372e-21,
"unit": 1 / u.sec ** 2,
"rtol": 1e-4,
},
"log.final.el.DOrbPerDtEqtide": {"value": -4.857058e-10, "rtol": 1e-4},
"log.final.el.EccTimeEqtide": {
"value": 2.742067e13,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.el.SemiTimeEqtide": {
"value": 2.925710e15,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.el.DHEccDtEqtide": {
"value": -0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.el.DKEccDtEqtide": {
"value": -2.496493e-15,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.el.DXoblDtEqtide": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.el.DYoblDtEqtide": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.el.DZoblDtEqtide": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.el.LockTime": {"value": 8.193554e10, "unit": u.sec, "rtol": 1e-4},
"log.final.el.BodyDsemiDtEqtide": {"value": -1.000000, "rtol": 1e-4},
"log.final.el.BodyDeccDt": {"value": -1.000000, "rtol": 1e-4},
"log.final.el.DOblDtEqtide": {
"value": 0.000000,
"unit": u.rad / u.sec,
"rtol": 1e-4,
},
"log.final.el.DRotPerDtEqtide": {"value": -7.945685e-298, "rtol": 1e-4},
"log.final.el.DRotRateDtEqtide": {
"value": 5.562685e-309,
"unit": 1 / u.sec ** 2,
"rtol": 1e-4,
},
"log.final.el.EqRotRateDiscrete": {
"value": 6.632335e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.el.EqRotPerDiscrete": {
"value": 9.473564e05,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.el.EqRotRateCont": {
"value": 6.927597e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.el.EqRotPerCont": {
"value": 9.069791e05,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.el.EqRotPer": {"value": 9.473564e05, "unit": u.sec, "rtol": 1e-4},
"log.final.el.EqTidePower": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.el.GammaRot": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.el.GammaOrb": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.el.OceanK2": {"value": 0.010000, "rtol": 1e-4},
"log.final.el.EnvTidalQ": {"value": -1.000000, "rtol": 1e-4},
"log.final.el.OceanTidalQ": {"value": -1.000000, "rtol": 1e-4},
"log.final.el.TideLock": {"value": 1.000000, "rtol": 1e-4},
"log.final.el.RotTimeEqtide": {
"value": 1.192290e303,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.el.EnvK2": {"value": 0.500000, "rtol": 1e-4},
"log.final.el.OblTimeEqtide": {"value": -1.000000, "rtol": 1e-4},
"log.final.el.PowerEqtide": {"value": 1.862016e19, "unit": u.W, "rtol": 1e-4},
"log.final.el.SurfEnFluxEqtide": {
"value": 34.098849,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.el.SurfWaterMass": {"value": 0.000000, "unit": u.kg, "rtol": 1e-4},
"log.final.el.EnvelopeMass": {
"value": 0.986370,
"unit": u.Mearth,
"rtol": 1e-4,
},
"log.final.el.OxygenMass": {"value": 0.000000, "unit": u.kg, "rtol": 1e-4},
"log.final.el.RGLimit": {"value": 3.127704e09, "unit": u.m, "rtol": 1e-4},
"log.final.el.XO": {"value": 0.000000, "rtol": 1e-4},
"log.final.el.EtaO": {"value": 0.000000, "rtol": 1e-4},
"log.final.el.PlanetRadius": {
"value": 32.683276,
"unit": u.Rearth,
"rtol": 1e-4,
},
"log.final.el.OxygenMantleMass": {
"value": 0.000000,
"unit": u.kg,
"rtol": 1e-4,
},
"log.final.el.RadXUV": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.el.RadSolid": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.el.PresXUV": {"value": 5.000000, "rtol": 1e-4},
"log.final.el.ScaleHeight": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.el.ThermTemp": {"value": 400.000000, "unit": u.K, "rtol": 1e-4},
"log.final.el.AtmGasConst": {"value": 4124.000000, "rtol": 1e-4},
"log.final.el.PresSurf": {"value": -1.000000, "unit": u.Pa, "rtol": 1e-4},
"log.final.el.DEnvMassDt": {
"value": -2.614005e09,
"unit": u.kg / u.sec,
"rtol": 1e-4,
},
"log.final.el.FXUV": {"value": 0.073463, "unit": u.W / u.m ** 2, "rtol": 1e-4},
"log.final.el.AtmXAbsEffH2O": {"value": 0.300000, "rtol": 1e-4},
"log.final.el.RocheRadius": {"value": 1.817114e08, "unit": u.m, "rtol": 1e-4},
"log.final.el.BondiRadius": {"value": 7.982897e08, "unit": u.m, "rtol": 1e-4},
"log.final.el.HEscapeRegime": {"value": 3.000000, "rtol": 1e-4},
"log.final.el.RRCriticalFlux": {
"value": 0.000139,
"unit": u.W / u.m ** 2,
"rtol": 1e-4,
},
"log.final.el.KTide": {"value": 1.000000, "rtol": 1e-4},
"log.final.el.RGDuration": {"value": 1.00000e06, "unit": u.yr, "rtol": 1e-4},
"log.final.rr.Mass": {"value": 1.999399, "unit": u.Mearth, "rtol": 1e-4},
"log.final.rr.Obliquity": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.rr.PrecA": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.rr.Xobl": {"value": 1.563665e-162, "rtol": 1e-4},
"log.final.rr.Yobl": {"value": 0.000000, "rtol": 1e-4},
"log.final.rr.Zobl": {"value": 1.000000, "rtol": 1e-4},
"log.final.rr.Radius": {"value": 2.095926e08, "unit": u.m, "rtol": 1e-4},
"log.final.rr.RadGyra": {"value": 0.400000, "rtol": 1e-4},
"log.final.rr.RotAngMom": {
"value": 5.561691e35,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.rr.RotKinEnergy": {
"value": 1.842803e30,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.rr.RotVel": {
"value": 1388.922578,
"unit": u.m / u.sec,
"rtol": 1e-4,
},
"log.final.rr.BodyType": {"value": 0.000000, "rtol": 1e-4},
"log.final.rr.RotRate": {
"value": 6.626773e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.rr.RotPer": {"value": 10.973977, "unit": u.day, "rtol": 1e-4},
"log.final.rr.Density": {
"value": 0.309611,
"unit": u.kg / u.m ** 3,
"rtol": 1e-4,
},
"log.final.rr.SurfEnFluxTotal": {
"value": 33.332187,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.rr.TidalQ": {"value": -1.000000e05, "rtol": 1e-4},
"log.final.rr.ImK2": {"value": -5.000000e-06, "rtol": 1e-4},
"log.final.rr.K2": {"value": 0.500000, "rtol": 1e-4},
"log.final.rr.K2Man": {"value": 0.300000, "rtol": 1e-4},
"log.final.rr.Imk2Man": {"value": -0.003000, "rtol": 1e-4},
"log.final.rr.TidalQMantle": {"value": 100.000000, "rtol": 1e-4},
"log.final.rr.HEcc": {"value": 0.000000, "rtol": 1e-4},
"log.final.rr.HZLimitDryRunaway": {
"value": 3.043303e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.rr.HZLimRecVenus": {"value": 2.502002e09, "unit": u.m, "rtol": 1e-4},
"log.final.rr.HZLimRunaway": {"value": 3.267138e09, "unit": u.m, "rtol": 1e-4},
"log.final.rr.HZLimMoistGreenhouse": {
"value": 3.310536e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.rr.HZLimMaxGreenhouse": {
"value": 5.611497e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.rr.HZLimEarlyMars": {
"value": 6.122597e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.rr.Instellation": {
"value": 73.379922,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.rr.KEcc": {"value": 0.068275, "rtol": 1e-4},
"log.final.rr.Eccentricity": {"value": 0.068275, "rtol": 1e-4},
"log.final.rr.OrbEnergy": {
"value": -5.480386e34,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.rr.MeanMotion": {
"value": 6.626773e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.rr.OrbPeriod": {"value": 9.481516e05, "unit": u.sec, "rtol": 1e-4},
"log.final.rr.SemiMajorAxis": {"value": 0.096645, "unit": u.au, "rtol": 1e-4},
"log.final.rr.CriticalSemiMajorAxis": {
"value": -1.000000,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.rr.COPP": {"value": 0.000000, "rtol": 1e-4},
"log.final.rr.OrbAngMom": {
"value": 1.650154e40,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.rr.LongP": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.rr.LXUVTot": {
"value": -1.000000,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.rr.TotOrbEnergy": {
"value": -1.740032e35,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.rr.OrbPotEnergy": {
"value": -1.096077e35,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.rr.LostEnergy": {
"value": 2.725921e33,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.rr.TidalRadius": {"value": 2.095926e08, "unit": u.m, "rtol": 1e-4},
"log.final.rr.DsemiDtEqtide": {
"value": -4.999833e-06,
"unit": u.m / u.sec,
"rtol": 1e-4,
},
"log.final.rr.DeccDtEqtide": {
"value": -2.532564e-15,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.rr.DMeanMotionDtEqtide": {
"value": 3.437522e-21,
"unit": 1 / u.sec ** 2,
"rtol": 1e-4,
},
"log.final.rr.DOrbPerDtEqtide": {"value": -4.918370e-10, "rtol": 1e-4},
"log.final.rr.EccTimeEqtide": {
"value": 2.695885e13,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.rr.SemiTimeEqtide": {
"value": 2.891664e15,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.rr.DHEccDtEqtide": {
"value": -0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.rr.DKEccDtEqtide": {
"value": -2.532564e-15,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.rr.DXoblDtEqtide": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.rr.DYoblDtEqtide": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.rr.DZoblDtEqtide": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.rr.LockTime": {"value": 1.711407e11, "unit": u.sec, "rtol": 1e-4},
"log.final.rr.BodyDsemiDtEqtide": {"value": -1.000000, "rtol": 1e-4},
"log.final.rr.BodyDeccDt": {"value": -1.000000, "rtol": 1e-4},
"log.final.rr.DOblDtEqtide": {
"value": 0.000000,
"unit": u.rad / u.sec,
"rtol": 1e-4,
},
"log.final.rr.DRotPerDtEqtide": {"value": -7.959031e-298, "rtol": 1e-4},
"log.final.rr.DRotRateDtEqtide": {
"value": 5.562685e-309,
"unit": 1 / u.sec ** 2,
"rtol": 1e-4,
},
"log.final.rr.EqRotRateDiscrete": {
"value": 6.626773e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.rr.EqRotPerDiscrete": {
"value": 9.481516e05,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.rr.EqRotRateCont": {
"value": 6.920233e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.rr.EqRotPerCont": {
"value": 9.079442e05,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.rr.EqRotPer": {"value": 9.481516e05, "unit": u.sec, "rtol": 1e-4},
"log.final.rr.EqTidePower": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.rr.GammaRot": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.rr.GammaOrb": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.rr.OceanK2": {"value": 0.010000, "rtol": 1e-4},
"log.final.rr.EnvTidalQ": {"value": -1.000000, "rtol": 1e-4},
"log.final.rr.OceanTidalQ": {"value": -1.000000, "rtol": 1e-4},
"log.final.rr.TideLock": {"value": 1.000000, "rtol": 1e-4},
"log.final.rr.RotTimeEqtide": {
"value": 1.191290e303,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.rr.EnvK2": {"value": 0.500000, "rtol": 1e-4},
"log.final.rr.OblTimeEqtide": {"value": -1.000000, "rtol": 1e-4},
"log.final.rr.PowerEqtide": {"value": 1.895236e19, "unit": u.W, "rtol": 1e-4},
"log.final.rr.SurfEnFluxEqtide": {
"value": 34.332187,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.rr.SurfWaterMass": {"value": 0.000000, "unit": u.kg, "rtol": 1e-4},
"log.final.rr.EnvelopeMass": {
"value": 0.999399,
"unit": u.Mearth,
"rtol": 1e-4,
},
"log.final.rr.OxygenMass": {"value": 0.000000, "unit": u.kg, "rtol": 1e-4},
"log.final.rr.RGLimit": {"value": 3.127270e09, "unit": u.m, "rtol": 1e-4},
"log.final.rr.XO": {"value": 0.000000, "rtol": 1e-4},
"log.final.rr.EtaO": {"value": 0.000000, "rtol": 1e-4},
"log.final.rr.PlanetRadius": {
"value": 32.861293,
"unit": u.Rearth,
"rtol": 1e-4,
},
"log.final.rr.OxygenMantleMass": {
"value": 0.000000,
"unit": u.kg,
"rtol": 1e-4,
},
"log.final.rr.RadXUV": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.rr.RadSolid": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.rr.PresXUV": {"value": 5.000000, "rtol": 1e-4},
"log.final.rr.ScaleHeight": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.rr.ThermTemp": {"value": 400.000000, "unit": u.K, "rtol": 1e-4},
"log.final.rr.AtmGasConst": {"value": 4124.000000, "rtol": 1e-4},
"log.final.rr.PresSurf": {"value": -1.000000, "unit": u.Pa, "rtol": 1e-4},
"log.final.rr.DEnvMassDt": {
"value": -1.147322e08,
"unit": u.kg / u.sec,
"rtol": 1e-4,
},
"log.final.rr.FXUV": {"value": 0.073380, "unit": u.W / u.m ** 2, "rtol": 1e-4},
"log.final.rr.AtmXAbsEffH2O": {"value": 0.300000, "rtol": 1e-4},
"log.final.rr.RocheRadius": {"value": 1.822097e08, "unit": u.m, "rtol": 1e-4},
"log.final.rr.BondiRadius": {"value": 8.033012e08, "unit": u.m, "rtol": 1e-4},
"log.final.rr.HEscapeRegime": {"value": 6.000000, "rtol": 1e-4},
"log.final.rr.RRCriticalFlux": {
"value": 0.000139,
"unit": u.W / u.m ** 2,
"rtol": 1e-4,
},
"log.final.rr.KTide": {"value": 1.000000, "rtol": 1e-4},
"log.final.rr.RGDuration": {"value": 1.00000e06, "unit": u.yr, "rtol": 1e-4},
}
)
| 45.200401
| 88
| 0.496675
|
import astropy.units as u
import pytest
from benchmark import Benchmark, benchmark
@benchmark(
{
"log.initial.system.Age": {"value": 3.155760e13, "unit": u.sec},
"log.initial.system.Time": {"value": 0.000000, "unit": u.sec},
"log.initial.system.TotAngMom": {
"value": 6.747268e40,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.system.TotEnergy": {"value": -2.482441e43, "unit": u.Joule},
"log.initial.system.PotEnergy": {"value": -2.482440e43, "unit": u.Joule},
"log.initial.system.KinEnergy": {"value": 5.347271e34, "unit": u.Joule},
"log.initial.system.DeltaTime": {"value": 0.000000, "unit": u.sec},
"log.initial.star.Mass": {"value": 1.988416e30, "unit": u.kg},
"log.initial.star.Obliquity": {"value": 0.000000, "unit": u.rad},
"log.initial.star.PrecA": {"value": 0.000000, "unit": u.rad},
"log.initial.star.Xobl": {"value": 0.000000},
"log.initial.star.Yobl": {"value": 0.000000},
"log.initial.star.Zobl": {"value": 1.000000},
"log.initial.star.Radius": {"value": 6.378100e06, "unit": u.m},
"log.initial.star.RadGyra": {"value": 0.500000},
"log.initial.star.RotAngMom": {
"value": 1.470605e39,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.star.RotKinEnergy": {"value": 5.347271e34, "unit": u.Joule},
"log.initial.star.RotVel": {"value": 463.828521, "unit": u.m / u.sec},
"log.initial.star.BodyType": {"value": 0.000000},
"log.initial.star.RotRate": {"value": 7.272205e-05, "unit": 1 / u.sec},
"log.initial.star.RotPer": {"value": 8.640000e04, "unit": u.sec},
"log.initial.star.Density": {"value": 1.829552e09, "unit": u.kg / u.m ** 3},
"log.initial.star.SurfEnFluxTotal": {
"value": 4.474499e-12,
"unit": u.kg / u.sec ** 3,
},
"log.initial.star.TidalQ": {"value": 1.000000e06},
"log.initial.star.ImK2": {"value": -5.000000e-07},
"log.initial.star.K2": {"value": 0.500000},
"log.initial.star.K2Man": {"value": 0.010000},
"log.initial.star.Imk2Man": {"value": 0.000000},
"log.initial.star.TidalQMantle": {"value": 100.000000},
"log.initial.star.HEcc": {"value": 0.000000},
"log.initial.star.HZLimitDryRunaway": {"value": 3.036202e09, "unit": u.m},
"log.initial.star.HZLimRecVenus": {"value": 2.502002e09, "unit": u.m},
"log.initial.star.HZLimRunaway": {"value": 3.267138e09, "unit": u.m},
"log.initial.star.HZLimMoistGreenhouse": {"value": 3.310536e09, "unit": u.m},
"log.initial.star.HZLimMaxGreenhouse": {"value": 5.611497e09, "unit": u.m},
"log.initial.star.HZLimEarlyMars": {"value": 6.122597e09, "unit": u.m},
"log.initial.star.Instellation": {
"value": -1.000000,
"unit": u.kg / u.sec ** 3,
},
"log.initial.star.KEcc": {"value": 0.000000},
"log.initial.star.Eccentricity": {"value": -1.000000},
"log.initial.star.OrbEnergy": {"value": 0.000000, "unit": u.Joule},
"log.initial.star.MeanMotion": {"value": -1.000000, "unit": 1 / u.sec},
"log.initial.star.OrbPeriod": {"value": -1.000000, "unit": u.sec},
"log.initial.star.SemiMajorAxis": {"value": -1.000000, "unit": u.m},
"log.initial.star.CriticalSemiMajorAxis": {"value": -1.000000, "unit": u.m},
"log.initial.star.COPP": {"value": 0.000000},
"log.initial.star.OrbAngMom": {
"value": 0.000000,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.star.LongP": {"value": 0.000000, "unit": u.rad},
"log.initial.star.LXUVTot": {"value": 1.923000e20, "unit": u.kg / u.sec ** 3},
"log.initial.star.TotOrbEnergy": {"value": -2.119237e35, "unit": u.Joule},
"log.initial.star.OrbPotEnergy": {"value": -1.000000, "unit": u.Joule},
"log.initial.star.LostEnergy": {"value": 5.562685e-309, "unit": u.Joule},
"log.initial.star.LostAngMom": {
"value": 5.562685e-309,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.star.LockTime": {"value": -1.000000, "unit": u.sec},
"log.initial.star.BodyDsemiDtEqtide": {"value": -1.000000},
"log.initial.star.BodyDeccDt": {"value": -1.000000},
"log.initial.star.DOblDtEqtide": {"value": 0.000000, "unit": u.rad / u.sec},
"log.initial.star.DRotPerDtEqtide": {"value": 2.054554e-27},
"log.initial.star.DRotRateDtEqtide": {
"value": -1.729298e-36,
"unit": 1 / u.sec ** 2,
},
"log.initial.star.EqRotRateDiscrete": {
"value": 6.296062e-06,
"unit": 1 / u.sec,
},
"log.initial.star.EqRotPerDiscrete": {"value": 9.979547e05, "unit": u.sec},
"log.initial.star.EqRotRateCont": {"value": 8.688566e-06, "unit": 1 / u.sec},
"log.initial.star.EqRotPerCont": {"value": 7.231556e05, "unit": u.sec},
"log.initial.star.EqRotPer": {"value": 9.979547e05, "unit": u.sec},
"log.initial.star.EqTidePower": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.star.GammaRot": {"value": -1.000000, "unit": u.sec},
"log.initial.star.GammaOrb": {"value": -1.000000, "unit": u.sec},
"log.initial.star.OceanK2": {"value": 0.010000},
"log.initial.star.EnvTidalQ": {"value": -1.000000},
"log.initial.star.OceanTidalQ": {"value": -1.000000},
"log.initial.star.TideLock": {"value": 0.000000},
"log.initial.star.RotTimeEqtide": {"value": 0.000000, "unit": u.sec},
"log.initial.star.EnvK2": {"value": 0.010000},
"log.initial.star.OblTimeEqtide": {"value": -1.000000},
"log.initial.star.PowerEqtide": {"value": 2287.372458, "unit": u.W},
"log.initial.star.SurfEnFluxEqtide": {
"value": 4.474499e-12,
"unit": u.kg / u.sec ** 3,
},
"log.initial.star.Luminosity": {"value": 1.923000e23, "unit": u.W},
"log.initial.star.LXUVStellar": {"value": 1.923000e20, "unit": u.W},
"log.initial.star.Temperature": {"value": 5778.000000, "unit": u.K},
"log.initial.star.LXUVFrac": {"value": 0.001000},
"log.initial.star.RossbyNumber": {"value": 0.078260},
"log.initial.star.DRotPerDtStellar": {"value": 6.530034e-18},
"log.initial.auto.Mass": {"value": 2.000000, "unit": u.Mearth},
"log.initial.auto.Obliquity": {"value": 0.785398, "unit": u.rad},
"log.initial.auto.PrecA": {"value": 0.000000, "unit": u.rad},
"log.initial.auto.Xobl": {"value": 0.707107},
"log.initial.auto.Yobl": {"value": 0.000000},
"log.initial.auto.Zobl": {"value": 0.707107},
"log.initial.auto.Radius": {"value": 2.096446e08, "unit": u.m},
"log.initial.auto.RadGyra": {"value": 0.400000},
"log.initial.auto.RotAngMom": {
"value": 1.221650e37,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.auto.RotKinEnergy": {"value": 8.884088e32, "unit": u.Joule},
"log.initial.auto.RotVel": {"value": 3.049157e04, "unit": u.m / u.sec},
"log.initial.auto.BodyType": {"value": 0.000000},
"log.initial.auto.RotRate": {"value": 0.000145, "unit": 1 / u.sec},
"log.initial.auto.RotPer": {"value": 0.500000, "unit": u.day},
"log.initial.auto.Density": {"value": 0.309474, "unit": u.kg / u.m ** 3},
"log.initial.auto.SurfEnFluxTotal": {
"value": 2.324795e04,
"unit": u.kg / u.sec ** 3,
},
"log.initial.auto.TidalQ": {"value": -1.000000e05},
"log.initial.auto.ImK2": {"value": -5.000000e-06},
"log.initial.auto.K2": {"value": 0.500000},
"log.initial.auto.K2Man": {"value": 0.300000},
"log.initial.auto.Imk2Man": {"value": -0.003000},
"log.initial.auto.TidalQMantle": {"value": 100.000000},
"log.initial.auto.HEcc": {"value": 0.000000},
"log.initial.auto.HZLimitDryRunaway": {"value": 3.098811e09, "unit": u.m},
"log.initial.auto.HZLimRecVenus": {"value": 2.502002e09, "unit": u.m},
"log.initial.auto.HZLimRunaway": {"value": 3.267138e09, "unit": u.m},
"log.initial.auto.HZLimMoistGreenhouse": {"value": 3.310536e09, "unit": u.m},
"log.initial.auto.HZLimMaxGreenhouse": {"value": 5.611497e09, "unit": u.m},
"log.initial.auto.HZLimEarlyMars": {"value": 6.122597e09, "unit": u.m},
"log.initial.auto.Instellation": {
"value": 69.788358,
"unit": u.kg / u.sec ** 3,
},
"log.initial.auto.KEcc": {"value": 0.200000},
"log.initial.auto.Eccentricity": {"value": 0.200000},
"log.initial.auto.OrbEnergy": {"value": -5.298093e34, "unit": u.Joule},
"log.initial.auto.MeanMotion": {"value": 6.296062e-06, "unit": 1 / u.sec},
"log.initial.auto.OrbPeriod": {"value": 9.979547e05, "unit": u.sec},
"log.initial.auto.SemiMajorAxis": {"value": 0.100000, "unit": u.au},
"log.initial.auto.CriticalSemiMajorAxis": {"value": -1.000000, "unit": u.m},
"log.initial.auto.COPP": {"value": 0.000000},
"log.initial.auto.OrbAngMom": {
"value": 1.648983e40,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.auto.LongP": {"value": 0.000000, "unit": u.rad},
"log.initial.auto.LXUVTot": {"value": -1.000000, "unit": u.kg / u.sec ** 3},
"log.initial.auto.TotOrbEnergy": {"value": -2.119237e35, "unit": u.Joule},
"log.initial.auto.OrbPotEnergy": {"value": -1.059619e35, "unit": u.Joule},
"log.initial.auto.LostEnergy": {"value": 5.562685e-309, "unit": u.Joule},
"log.initial.auto.TidalRadius": {"value": 2.096446e08, "unit": u.m},
"log.initial.auto.DsemiDtEqtide": {"value": 0.000192, "unit": u.m / u.sec},
"log.initial.auto.DeccDtEqtide": {"value": 4.036352e-15, "unit": 1 / u.sec},
"log.initial.auto.DMeanMotionDtEqtide": {
"value": -1.211805e-19,
"unit": 1 / u.sec ** 2,
},
"log.initial.auto.DOrbPerDtEqtide": {"value": 1.920766e-08},
"log.initial.auto.EccTimeEqtide": {"value": 4.954969e13, "unit": u.sec},
"log.initial.auto.SemiTimeEqtide": {"value": 7.793412e13, "unit": u.sec},
"log.initial.auto.DHEccDtEqtide": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.auto.DKEccDtEqtide": {"value": 4.036352e-15, "unit": 1 / u.sec},
"log.initial.auto.DXoblDtEqtide": {"value": 1.462258e-12, "unit": 1 / u.sec},
"log.initial.auto.DYoblDtEqtide": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.auto.DZoblDtEqtide": {"value": -1.462258e-12, "unit": 1 / u.sec},
"log.initial.auto.LockTime": {"value": -1.000000, "unit": u.sec},
"log.initial.auto.BodyDsemiDtEqtide": {"value": -1.000000},
"log.initial.auto.BodyDeccDt": {"value": -1.000000},
"log.initial.auto.DOblDtEqtide": {"value": 2.067945e-12, "unit": u.rad / u.sec},
"log.initial.auto.DRotPerDtEqtide": {"value": 3.287202e-07},
"log.initial.auto.DRotRateDtEqtide": {
"value": -1.106722e-15,
"unit": 1 / u.sec ** 2,
},
"log.initial.auto.EqRotRateDiscrete": {
"value": 6.296062e-06,
"unit": 1 / u.sec,
},
"log.initial.auto.EqRotPerDiscrete": {"value": 9.979547e05, "unit": u.sec},
"log.initial.auto.EqRotRateCont": {"value": 8.688566e-06, "unit": 1 / u.sec},
"log.initial.auto.EqRotPerCont": {"value": 7.231556e05, "unit": u.sec},
"log.initial.auto.EqRotPer": {"value": 9.979547e05, "unit": u.sec},
"log.initial.auto.EqTidePower": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.auto.GammaRot": {"value": -1.000000, "unit": u.sec},
"log.initial.auto.GammaOrb": {"value": -1.000000, "unit": u.sec},
"log.initial.auto.OceanK2": {"value": 0.010000},
"log.initial.auto.EnvTidalQ": {"value": -1.000000},
"log.initial.auto.OceanTidalQ": {"value": -1.000000},
"log.initial.auto.TideLock": {"value": 0.000000},
"log.initial.auto.RotTimeEqtide": {"value": 1.314188e11, "unit": u.sec},
"log.initial.auto.EnvK2": {"value": 0.500000},
"log.initial.auto.OblTimeEqtide": {"value": -1.000000},
"log.initial.auto.PowerEqtide": {"value": 1.284046e22, "unit": u.W},
"log.initial.auto.SurfEnFluxEqtide": {
"value": 2.324895e04,
"unit": u.kg / u.sec ** 3,
},
"log.initial.auto.SurfWaterMass": {"value": 0.000000, "unit": u.kg},
"log.initial.auto.EnvelopeMass": {"value": 1.000000, "unit": u.Mearth},
"log.initial.auto.OxygenMass": {"value": 0.000000, "unit": u.kg},
"log.initial.auto.RGLimit": {"value": 3.099115e09, "unit": u.m},
"log.initial.auto.XO": {"value": 0.000000},
"log.initial.auto.EtaO": {"value": 0.000000},
"log.initial.auto.PlanetRadius": {"value": 32.869442, "unit": u.Rearth},
"log.initial.auto.OxygenMantleMass": {"value": 0.000000, "unit": u.kg},
"log.initial.auto.RadXUV": {"value": -1.000000, "unit": u.m},
"log.initial.auto.RadSolid": {"value": -1.000000, "unit": u.m},
"log.initial.auto.PresXUV": {"value": 5.000000},
"log.initial.auto.ScaleHeight": {"value": -1.000000, "unit": u.m},
"log.initial.auto.ThermTemp": {"value": 400.000000, "unit": u.K},
"log.initial.auto.AtmGasConst": {"value": 4124.000000},
"log.initial.auto.PresSurf": {"value": -1.000000, "unit": u.Pa},
"log.initial.auto.DEnvMassDt": {"value": -2.508715e09, "unit": u.kg / u.sec},
"log.initial.auto.FXUV": {"value": 0.069788, "unit": u.W / u.m ** 2},
"log.initial.auto.AtmXAbsEffH2O": {"value": 0.300000},
"log.initial.auto.RocheRadius": {"value": 1.885546e08, "unit": u.m},
"log.initial.auto.BondiRadius": {"value": 7.899468e08, "unit": u.m},
"log.initial.auto.HEscapeRegime": {"value": 3.000000},
"log.initial.auto.RRCriticalFlux": {"value": 0.000139, "unit": u.W / u.m ** 2},
"log.initial.auto.KTide": {"value": 1.000000},
"log.initial.auto.RGDuration": {"value": 1.00000e06, "unit": u.yr},
"log.initial.bondi.Mass": {"value": 2.000000, "unit": u.Mearth},
"log.initial.bondi.Obliquity": {"value": 0.785398, "unit": u.rad},
"log.initial.bondi.PrecA": {"value": 0.000000, "unit": u.rad},
"log.initial.bondi.Xobl": {"value": 0.707107},
"log.initial.bondi.Yobl": {"value": 0.000000},
"log.initial.bondi.Zobl": {"value": 0.707107},
"log.initial.bondi.Radius": {"value": 2.096446e08, "unit": u.m},
"log.initial.bondi.RadGyra": {"value": 0.400000},
"log.initial.bondi.RotAngMom": {
"value": 1.221650e37,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.bondi.RotKinEnergy": {"value": 8.884088e32, "unit": u.Joule},
"log.initial.bondi.RotVel": {"value": 3.049157e04, "unit": u.m / u.sec},
"log.initial.bondi.BodyType": {"value": 0.000000},
"log.initial.bondi.RotRate": {"value": 0.000145, "unit": 1 / u.sec},
"log.initial.bondi.RotPer": {"value": 0.500000, "unit": u.day},
"log.initial.bondi.Density": {"value": 0.309474, "unit": u.kg / u.m ** 3},
"log.initial.bondi.SurfEnFluxTotal": {
"value": 2.324795e04,
"unit": u.kg / u.sec ** 3,
},
"log.initial.bondi.TidalQ": {"value": -1.000000e05},
"log.initial.bondi.ImK2": {"value": -5.000000e-06},
"log.initial.bondi.K2": {"value": 0.500000},
"log.initial.bondi.K2Man": {"value": 0.300000},
"log.initial.bondi.Imk2Man": {"value": -0.003000},
"log.initial.bondi.TidalQMantle": {"value": 100.000000},
"log.initial.bondi.HEcc": {"value": 0.000000},
"log.initial.bondi.HZLimitDryRunaway": {"value": 3.098811e09, "unit": u.m},
"log.initial.bondi.HZLimRecVenus": {"value": 2.502002e09, "unit": u.m},
"log.initial.bondi.HZLimRunaway": {"value": 3.267138e09, "unit": u.m},
"log.initial.bondi.HZLimMoistGreenhouse": {"value": 3.310536e09, "unit": u.m},
"log.initial.bondi.HZLimMaxGreenhouse": {"value": 5.611497e09, "unit": u.m},
"log.initial.bondi.HZLimEarlyMars": {"value": 6.122597e09, "unit": u.m},
"log.initial.bondi.Instellation": {
"value": 69.788358,
"unit": u.kg / u.sec ** 3,
},
"log.initial.bondi.KEcc": {"value": 0.200000},
"log.initial.bondi.Eccentricity": {"value": 0.200000},
"log.initial.bondi.OrbEnergy": {"value": -5.298093e34, "unit": u.Joule},
"log.initial.bondi.MeanMotion": {"value": 6.296062e-06, "unit": 1 / u.sec},
"log.initial.bondi.OrbPeriod": {"value": 9.979547e05, "unit": u.sec},
"log.initial.bondi.SemiMajorAxis": {"value": 0.100000, "unit": u.au},
"log.initial.bondi.CriticalSemiMajorAxis": {"value": -1.000000, "unit": u.m},
"log.initial.bondi.COPP": {"value": 0.000000},
"log.initial.bondi.OrbAngMom": {
"value": 1.648983e40,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.bondi.LongP": {"value": 0.000000, "unit": u.rad},
"log.initial.bondi.LXUVTot": {"value": -1.000000, "unit": u.kg / u.sec ** 3},
"log.initial.bondi.TotOrbEnergy": {"value": -2.119237e35, "unit": u.Joule},
"log.initial.bondi.OrbPotEnergy": {"value": -1.059619e35, "unit": u.Joule},
"log.initial.bondi.LostEnergy": {"value": 5.562685e-309, "unit": u.Joule},
"log.initial.bondi.TidalRadius": {"value": 2.096446e08, "unit": u.m},
"log.initial.bondi.DsemiDtEqtide": {"value": 0.000192, "unit": u.m / u.sec},
"log.initial.bondi.DeccDtEqtide": {"value": 4.036352e-15, "unit": 1 / u.sec},
"log.initial.bondi.DMeanMotionDtEqtide": {
"value": -1.211805e-19,
"unit": 1 / u.sec ** 2,
},
"log.initial.bondi.DOrbPerDtEqtide": {"value": 1.920766e-08},
"log.initial.bondi.EccTimeEqtide": {"value": 4.954969e13, "unit": u.sec},
"log.initial.bondi.SemiTimeEqtide": {"value": 7.793412e13, "unit": u.sec},
"log.initial.bondi.DHEccDtEqtide": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.bondi.DKEccDtEqtide": {"value": 4.036352e-15, "unit": 1 / u.sec},
"log.initial.bondi.DXoblDtEqtide": {"value": 1.462258e-12, "unit": 1 / u.sec},
"log.initial.bondi.DYoblDtEqtide": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.bondi.DZoblDtEqtide": {"value": -1.462258e-12, "unit": 1 / u.sec},
"log.initial.bondi.LockTime": {"value": -1.000000, "unit": u.sec},
"log.initial.bondi.BodyDsemiDtEqtide": {"value": -1.000000},
"log.initial.bondi.BodyDeccDt": {"value": -1.000000},
"log.initial.bondi.DOblDtEqtide": {
"value": 2.067945e-12,
"unit": u.rad / u.sec,
},
"log.initial.bondi.DRotPerDtEqtide": {"value": 3.287202e-07},
"log.initial.bondi.DRotRateDtEqtide": {
"value": -1.106722e-15,
"unit": 1 / u.sec ** 2,
},
"log.initial.bondi.EqRotRateDiscrete": {
"value": 6.296062e-06,
"unit": 1 / u.sec,
},
"log.initial.bondi.EqRotPerDiscrete": {"value": 9.979547e05, "unit": u.sec},
"log.initial.bondi.EqRotRateCont": {"value": 8.688566e-06, "unit": 1 / u.sec},
"log.initial.bondi.EqRotPerCont": {"value": 7.231556e05, "unit": u.sec},
"log.initial.bondi.EqRotPer": {"value": 9.979547e05, "unit": u.sec},
"log.initial.bondi.EqTidePower": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.bondi.GammaRot": {"value": -1.000000, "unit": u.sec},
"log.initial.bondi.GammaOrb": {"value": -1.000000, "unit": u.sec},
"log.initial.bondi.OceanK2": {"value": 0.010000},
"log.initial.bondi.EnvTidalQ": {"value": -1.000000},
"log.initial.bondi.OceanTidalQ": {"value": -1.000000},
"log.initial.bondi.TideLock": {"value": 0.000000},
"log.initial.bondi.RotTimeEqtide": {"value": 1.314188e11, "unit": u.sec},
"log.initial.bondi.EnvK2": {"value": 0.500000},
"log.initial.bondi.OblTimeEqtide": {"value": -1.000000},
"log.initial.bondi.PowerEqtide": {"value": 1.284046e22, "unit": u.W},
"log.initial.bondi.SurfEnFluxEqtide": {
"value": 2.324895e04,
"unit": u.kg / u.sec ** 3,
},
"log.initial.bondi.SurfWaterMass": {"value": 0.000000, "unit": u.kg},
"log.initial.bondi.EnvelopeMass": {"value": 1.000000, "unit": u.Mearth},
"log.initial.bondi.OxygenMass": {"value": 0.000000, "unit": u.kg},
"log.initial.bondi.RGLimit": {"value": 3.099115e09, "unit": u.m},
"log.initial.bondi.XO": {"value": 0.000000},
"log.initial.bondi.EtaO": {"value": 0.000000},
"log.initial.bondi.PlanetRadius": {"value": 32.869442, "unit": u.Rearth},
"log.initial.bondi.OxygenMantleMass": {"value": 0.000000, "unit": u.kg},
"log.initial.bondi.RadXUV": {"value": -1.000000, "unit": u.m},
"log.initial.bondi.RadSolid": {"value": -1.000000, "unit": u.m},
"log.initial.bondi.PresXUV": {"value": 5.000000},
"log.initial.bondi.ScaleHeight": {"value": -1.000000, "unit": u.m},
"log.initial.bondi.ThermTemp": {"value": 400.000000, "unit": u.K},
"log.initial.bondi.AtmGasConst": {"value": 4124.000000},
"log.initial.bondi.PresSurf": {"value": -1.000000, "unit": u.Pa},
"log.initial.bondi.DEnvMassDt": {"value": -1.230386e15, "unit": u.kg / u.sec},
"log.initial.bondi.FXUV": {"value": 0.069788, "unit": u.W / u.m ** 2},
"log.initial.bondi.AtmXAbsEffH2O": {"value": 0.300000},
"log.initial.bondi.RocheRadius": {"value": 1.885546e08, "unit": u.m},
"log.initial.bondi.BondiRadius": {"value": 7.899468e08, "unit": u.m},
"log.initial.bondi.HEscapeRegime": {"value": 5.000000},
"log.initial.bondi.RRCriticalFlux": {"value": 0.000139, "unit": u.W / u.m ** 2},
"log.initial.bondi.KTide": {"value": 1.000000},
"log.initial.bondi.RGDuration": {"value": 1.00000e06, "unit": u.yr},
"log.initial.el.Mass": {"value": 2.000000, "unit": u.Mearth},
"log.initial.el.Obliquity": {"value": 0.410152, "unit": u.rad},
"log.initial.el.PrecA": {"value": 0.000000, "unit": u.rad},
"log.initial.el.Xobl": {"value": 0.398749},
"log.initial.el.Yobl": {"value": 0.000000},
"log.initial.el.Zobl": {"value": 0.917060},
"log.initial.el.Radius": {"value": 2.096446e08, "unit": u.m},
"log.initial.el.RadGyra": {"value": 0.400000},
"log.initial.el.RotAngMom": {
"value": 6.108249e36,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.el.RotKinEnergy": {"value": 2.221022e32, "unit": u.Joule},
"log.initial.el.RotVel": {"value": 1.524578e04, "unit": u.m / u.sec},
"log.initial.el.BodyType": {"value": 0.000000},
"log.initial.el.RotRate": {"value": 7.272205e-05, "unit": 1 / u.sec},
"log.initial.el.RotPer": {"value": 1.000000, "unit": u.day},
"log.initial.el.Density": {"value": 0.309474, "unit": u.kg / u.m ** 3},
"log.initial.el.SurfEnFluxTotal": {
"value": 1.100803e04,
"unit": u.kg / u.sec ** 3,
},
"log.initial.el.TidalQ": {"value": -1.000000e05},
"log.initial.el.ImK2": {"value": -5.000000e-06},
"log.initial.el.K2": {"value": 0.500000},
"log.initial.el.K2Man": {"value": 0.300000},
"log.initial.el.Imk2Man": {"value": -0.003000},
"log.initial.el.TidalQMantle": {"value": 100.000000},
"log.initial.el.HEcc": {"value": 0.000000},
"log.initial.el.HZLimitDryRunaway": {"value": 3.098811e09, "unit": u.m},
"log.initial.el.HZLimRecVenus": {"value": 2.502002e09, "unit": u.m},
"log.initial.el.HZLimRunaway": {"value": 3.267138e09, "unit": u.m},
"log.initial.el.HZLimMoistGreenhouse": {"value": 3.310536e09, "unit": u.m},
"log.initial.el.HZLimMaxGreenhouse": {"value": 5.611497e09, "unit": u.m},
"log.initial.el.HZLimEarlyMars": {"value": 6.122597e09, "unit": u.m},
"log.initial.el.Instellation": {"value": 69.788358, "unit": u.kg / u.sec ** 3},
"log.initial.el.KEcc": {"value": 0.200000},
"log.initial.el.Eccentricity": {"value": 0.200000},
"log.initial.el.OrbEnergy": {"value": -5.298093e34, "unit": u.Joule},
"log.initial.el.MeanMotion": {"value": 6.296062e-06, "unit": 1 / u.sec},
"log.initial.el.OrbPeriod": {"value": 9.979547e05, "unit": u.sec},
"log.initial.el.SemiMajorAxis": {"value": 0.100000, "unit": u.au},
"log.initial.el.CriticalSemiMajorAxis": {"value": -1.000000, "unit": u.m},
"log.initial.el.COPP": {"value": 0.000000},
"log.initial.el.OrbAngMom": {
"value": 1.648983e40,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.el.LongP": {"value": 0.000000, "unit": u.rad},
"log.initial.el.LXUVTot": {"value": -1.000000, "unit": u.kg / u.sec ** 3},
"log.initial.el.TotOrbEnergy": {"value": -2.119237e35, "unit": u.Joule},
"log.initial.el.OrbPotEnergy": {"value": -1.059619e35, "unit": u.Joule},
"log.initial.el.LostEnergy": {"value": 5.562685e-309, "unit": u.Joule},
"log.initial.el.TidalRadius": {"value": 2.096446e08, "unit": u.m},
"log.initial.el.DsemiDtEqtide": {"value": 0.000192, "unit": u.m / u.sec},
"log.initial.el.DeccDtEqtide": {"value": 4.036352e-15, "unit": 1 / u.sec},
"log.initial.el.DMeanMotionDtEqtide": {
"value": -1.211805e-19,
"unit": 1 / u.sec ** 2,
},
"log.initial.el.DOrbPerDtEqtide": {"value": 1.920766e-08},
"log.initial.el.EccTimeEqtide": {"value": 4.954969e13, "unit": u.sec},
"log.initial.el.SemiTimeEqtide": {"value": 7.793412e13, "unit": u.sec},
"log.initial.el.DHEccDtEqtide": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.el.DKEccDtEqtide": {"value": 4.036352e-15, "unit": 1 / u.sec},
"log.initial.el.DXoblDtEqtide": {"value": 2.139632e-12, "unit": 1 / u.sec},
"log.initial.el.DYoblDtEqtide": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.el.DZoblDtEqtide": {"value": -9.303384e-13, "unit": 1 / u.sec},
"log.initial.el.LockTime": {"value": -1.000000, "unit": u.sec},
"log.initial.el.BodyDsemiDtEqtide": {"value": -1.000000},
"log.initial.el.BodyDeccDt": {"value": -1.000000},
"log.initial.el.DOblDtEqtide": {"value": 2.333143e-12, "unit": u.rad / u.sec},
"log.initial.el.DRotPerDtEqtide": {"value": 1.314881e-06},
"log.initial.el.DRotRateDtEqtide": {
"value": -1.106722e-15,
"unit": 1 / u.sec ** 2,
},
"log.initial.el.EqRotRateDiscrete": {"value": 6.296062e-06, "unit": 1 / u.sec},
"log.initial.el.EqRotPerDiscrete": {"value": 9.979547e05, "unit": u.sec},
"log.initial.el.EqRotRateCont": {"value": 8.688566e-06, "unit": 1 / u.sec},
"log.initial.el.EqRotPerCont": {"value": 7.231556e05, "unit": u.sec},
"log.initial.el.EqRotPer": {"value": 9.979547e05, "unit": u.sec},
"log.initial.el.EqTidePower": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.el.GammaRot": {"value": -1.000000, "unit": u.sec},
"log.initial.el.GammaOrb": {"value": -1.000000, "unit": u.sec},
"log.initial.el.OceanK2": {"value": 0.010000},
"log.initial.el.EnvTidalQ": {"value": -1.000000},
"log.initial.el.OceanTidalQ": {"value": -1.000000},
"log.initial.el.TideLock": {"value": 0.000000},
"log.initial.el.RotTimeEqtide": {"value": 6.570938e10, "unit": u.sec},
"log.initial.el.EnvK2": {"value": 0.500000},
"log.initial.el.OblTimeEqtide": {"value": -1.000000},
"log.initial.el.PowerEqtide": {"value": 6.080320e21, "unit": u.W},
"log.initial.el.SurfEnFluxEqtide": {
"value": 1.100903e04,
"unit": u.kg / u.sec ** 3,
},
"log.initial.el.SurfWaterMass": {"value": 0.000000, "unit": u.kg},
"log.initial.el.EnvelopeMass": {"value": 1.000000, "unit": u.Mearth},
"log.initial.el.OxygenMass": {"value": 0.000000, "unit": u.kg},
"log.initial.el.RGLimit": {"value": 3.099115e09, "unit": u.m},
"log.initial.el.XO": {"value": 0.000000},
"log.initial.el.EtaO": {"value": 0.000000},
"log.initial.el.PlanetRadius": {"value": 32.869442, "unit": u.Rearth},
"log.initial.el.OxygenMantleMass": {"value": 0.000000, "unit": u.kg},
"log.initial.el.RadXUV": {"value": -1.000000, "unit": u.m},
"log.initial.el.RadSolid": {"value": -1.000000, "unit": u.m},
"log.initial.el.PresXUV": {"value": 5.000000},
"log.initial.el.ScaleHeight": {"value": -1.000000, "unit": u.m},
"log.initial.el.ThermTemp": {"value": 400.000000, "unit": u.K},
"log.initial.el.AtmGasConst": {"value": 4124.000000},
"log.initial.el.PresSurf": {"value": -1.000000, "unit": u.Pa},
"log.initial.el.DEnvMassDt": {"value": -2.508715e09, "unit": u.kg / u.sec},
"log.initial.el.FXUV": {"value": 0.069788, "unit": u.W / u.m ** 2},
"log.initial.el.AtmXAbsEffH2O": {"value": 0.300000},
"log.initial.el.RocheRadius": {"value": 1.885546e08, "unit": u.m},
"log.initial.el.BondiRadius": {"value": 7.899468e08, "unit": u.m},
"log.initial.el.HEscapeRegime": {"value": 3.000000},
"log.initial.el.RRCriticalFlux": {"value": 0.000139, "unit": u.W / u.m ** 2},
"log.initial.el.KTide": {"value": 1.000000},
"log.initial.el.RGDuration": {"value": 1.00000e06, "unit": u.yr},
"log.initial.rr.Mass": {"value": 2.000000, "unit": u.Mearth},
"log.initial.rr.Obliquity": {"value": 0.785398, "unit": u.rad},
"log.initial.rr.PrecA": {"value": 0.000000, "unit": u.rad},
"log.initial.rr.Xobl": {"value": 0.707107},
"log.initial.rr.Yobl": {"value": 0.000000},
"log.initial.rr.Zobl": {"value": 0.707107},
"log.initial.rr.Radius": {"value": 2.096446e08, "unit": u.m},
"log.initial.rr.RadGyra": {"value": 0.400000},
"log.initial.rr.RotAngMom": {
"value": 1.221650e37,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.rr.RotKinEnergy": {"value": 8.884088e32, "unit": u.Joule},
"log.initial.rr.RotVel": {"value": 3.049157e04, "unit": u.m / u.sec},
"log.initial.rr.BodyType": {"value": 0.000000},
"log.initial.rr.RotRate": {"value": 0.000145, "unit": 1 / u.sec},
"log.initial.rr.RotPer": {"value": 0.500000, "unit": u.day},
"log.initial.rr.Density": {"value": 0.309474, "unit": u.kg / u.m ** 3},
"log.initial.rr.SurfEnFluxTotal": {
"value": 2.324795e04,
"unit": u.kg / u.sec ** 3,
},
"log.initial.rr.TidalQ": {"value": -1.000000e05},
"log.initial.rr.ImK2": {"value": -5.000000e-06},
"log.initial.rr.K2": {"value": 0.500000},
"log.initial.rr.K2Man": {"value": 0.300000},
"log.initial.rr.Imk2Man": {"value": -0.003000},
"log.initial.rr.TidalQMantle": {"value": 100.000000},
"log.initial.rr.HEcc": {"value": 0.000000},
"log.initial.rr.HZLimitDryRunaway": {"value": 3.098811e09, "unit": u.m},
"log.initial.rr.HZLimRecVenus": {"value": 2.502002e09, "unit": u.m},
"log.initial.rr.HZLimRunaway": {"value": 3.267138e09, "unit": u.m},
"log.initial.rr.HZLimMoistGreenhouse": {"value": 3.310536e09, "unit": u.m},
"log.initial.rr.HZLimMaxGreenhouse": {"value": 5.611497e09, "unit": u.m},
"log.initial.rr.HZLimEarlyMars": {"value": 6.122597e09, "unit": u.m},
"log.initial.rr.Instellation": {"value": 69.788358, "unit": u.kg / u.sec ** 3},
"log.initial.rr.KEcc": {"value": 0.200000},
"log.initial.rr.Eccentricity": {"value": 0.200000},
"log.initial.rr.OrbEnergy": {"value": -5.298093e34, "unit": u.Joule},
"log.initial.rr.MeanMotion": {"value": 6.296062e-06, "unit": 1 / u.sec},
"log.initial.rr.OrbPeriod": {"value": 9.979547e05, "unit": u.sec},
"log.initial.rr.SemiMajorAxis": {"value": 0.100000, "unit": u.au},
"log.initial.rr.CriticalSemiMajorAxis": {"value": -1.000000, "unit": u.m},
"log.initial.rr.COPP": {"value": 0.000000},
"log.initial.rr.OrbAngMom": {
"value": 1.648983e40,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.rr.LongP": {"value": 0.000000, "unit": u.rad},
"log.initial.rr.LXUVTot": {"value": -1.000000, "unit": u.kg / u.sec ** 3},
"log.initial.rr.TotOrbEnergy": {"value": -2.119237e35, "unit": u.Joule},
"log.initial.rr.OrbPotEnergy": {"value": -1.059619e35, "unit": u.Joule},
"log.initial.rr.LostEnergy": {"value": 5.562685e-309, "unit": u.Joule},
"log.initial.rr.TidalRadius": {"value": 2.096446e08, "unit": u.m},
"log.initial.rr.DsemiDtEqtide": {"value": 0.000192, "unit": u.m / u.sec},
"log.initial.rr.DeccDtEqtide": {"value": 4.036352e-15, "unit": 1 / u.sec},
"log.initial.rr.DMeanMotionDtEqtide": {
"value": -1.211805e-19,
"unit": 1 / u.sec ** 2,
},
"log.initial.rr.DOrbPerDtEqtide": {"value": 1.920766e-08},
"log.initial.rr.EccTimeEqtide": {"value": 4.954969e13, "unit": u.sec},
"log.initial.rr.SemiTimeEqtide": {"value": 7.793412e13, "unit": u.sec},
"log.initial.rr.DHEccDtEqtide": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.rr.DKEccDtEqtide": {"value": 4.036352e-15, "unit": 1 / u.sec},
"log.initial.rr.DXoblDtEqtide": {"value": 1.462258e-12, "unit": 1 / u.sec},
"log.initial.rr.DYoblDtEqtide": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.rr.DZoblDtEqtide": {"value": -1.462258e-12, "unit": 1 / u.sec},
"log.initial.rr.LockTime": {"value": -1.000000, "unit": u.sec},
"log.initial.rr.BodyDsemiDtEqtide": {"value": -1.000000},
"log.initial.rr.BodyDeccDt": {"value": -1.000000},
"log.initial.rr.DOblDtEqtide": {"value": 2.067945e-12, "unit": u.rad / u.sec},
"log.initial.rr.DRotPerDtEqtide": {"value": 3.287202e-07},
"log.initial.rr.DRotRateDtEqtide": {
"value": -1.106722e-15,
"unit": 1 / u.sec ** 2,
},
"log.initial.rr.EqRotRateDiscrete": {"value": 6.296062e-06, "unit": 1 / u.sec},
"log.initial.rr.EqRotPerDiscrete": {"value": 9.979547e05, "unit": u.sec},
"log.initial.rr.EqRotRateCont": {"value": 8.688566e-06, "unit": 1 / u.sec},
"log.initial.rr.EqRotPerCont": {"value": 7.231556e05, "unit": u.sec},
"log.initial.rr.EqRotPer": {"value": 9.979547e05, "unit": u.sec},
"log.initial.rr.EqTidePower": {"value": 0.000000, "unit": 1 / u.sec},
"log.initial.rr.GammaRot": {"value": -1.000000, "unit": u.sec},
"log.initial.rr.GammaOrb": {"value": -1.000000, "unit": u.sec},
"log.initial.rr.OceanK2": {"value": 0.010000},
"log.initial.rr.EnvTidalQ": {"value": -1.000000},
"log.initial.rr.OceanTidalQ": {"value": -1.000000},
"log.initial.rr.TideLock": {"value": 0.000000},
"log.initial.rr.RotTimeEqtide": {"value": 1.314188e11, "unit": u.sec},
"log.initial.rr.EnvK2": {"value": 0.500000},
"log.initial.rr.OblTimeEqtide": {"value": -1.000000},
"log.initial.rr.PowerEqtide": {"value": 1.284046e22, "unit": u.W},
"log.initial.rr.SurfEnFluxEqtide": {
"value": 2.324895e04,
"unit": u.kg / u.sec ** 3,
},
"log.initial.rr.SurfWaterMass": {"value": 0.000000, "unit": u.kg},
"log.initial.rr.EnvelopeMass": {"value": 1.000000, "unit": u.Mearth},
"log.initial.rr.OxygenMass": {"value": 0.000000, "unit": u.kg},
"log.initial.rr.RGLimit": {"value": 3.099115e09, "unit": u.m},
"log.initial.rr.XO": {"value": 0.000000},
"log.initial.rr.EtaO": {"value": 0.000000},
"log.initial.rr.PlanetRadius": {"value": 32.869442, "unit": u.Rearth},
"log.initial.rr.OxygenMantleMass": {"value": 0.000000, "unit": u.kg},
"log.initial.rr.RadXUV": {"value": -1.000000, "unit": u.m},
"log.initial.rr.RadSolid": {"value": -1.000000, "unit": u.m},
"log.initial.rr.PresXUV": {"value": 5.000000},
"log.initial.rr.ScaleHeight": {"value": -1.000000, "unit": u.m},
"log.initial.rr.ThermTemp": {"value": 400.000000, "unit": u.K},
"log.initial.rr.AtmGasConst": {"value": 4124.000000},
"log.initial.rr.PresSurf": {"value": -1.000000, "unit": u.Pa},
"log.initial.rr.DEnvMassDt": {"value": -1.119308e08, "unit": u.kg / u.sec},
"log.initial.rr.FXUV": {"value": 0.069788, "unit": u.W / u.m ** 2},
"log.initial.rr.AtmXAbsEffH2O": {"value": 0.300000},
"log.initial.rr.RocheRadius": {"value": 1.885546e08, "unit": u.m},
"log.initial.rr.BondiRadius": {"value": 7.899468e08, "unit": u.m},
"log.initial.rr.HEscapeRegime": {"value": 6.000000},
"log.initial.rr.RRCriticalFlux": {"value": 0.000139, "unit": u.W / u.m ** 2},
"log.initial.rr.KTide": {"value": 1.000000},
"log.initial.rr.RGDuration": {"value": 1.00000e06, "unit": u.yr},
"log.final.system.Age": {"value": 6.311520e13, "unit": u.sec, "rtol": 1e-4},
"log.final.system.Time": {"value": 3.155760e13, "unit": u.sec, "rtol": 1e-4},
"log.final.system.TotAngMom": {
"value": 5.425277e40,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.system.TotEnergy": {
"value": -2.482441e43,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.system.PotEnergy": {
"value": -2.482440e43,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.system.KinEnergy": {
"value": 5.347271e34,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.system.DeltaTime": {
"value": 4.863245e08,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.star.Mass": {"value": 1.988416e30, "unit": u.kg, "rtol": 1e-4},
"log.final.star.Obliquity": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.star.PrecA": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.star.Xobl": {"value": 0.000000, "rtol": 1e-4},
"log.final.star.Yobl": {"value": 0.000000, "rtol": 1e-4},
"log.final.star.Zobl": {"value": 1.000000, "rtol": 1e-4},
"log.final.star.Radius": {"value": 6.378100e06, "unit": u.m, "rtol": 1e-4},
"log.final.star.RadGyra": {"value": 0.500000, "rtol": 1e-4},
"log.final.star.RotAngMom": {
"value": 1.470605e39,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.star.RotKinEnergy": {
"value": 5.347271e34,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.star.RotVel": {
"value": 463.828520,
"unit": u.m / u.sec,
"rtol": 1e-4,
},
"log.final.star.BodyType": {"value": 0.000000, "rtol": 1e-4},
"log.final.star.RotRate": {
"value": 7.272205e-05,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.star.RotPer": {"value": 8.640000e04, "unit": u.sec, "rtol": 1e-4},
"log.final.star.Density": {
"value": 1.829552e09,
"unit": u.kg / u.m ** 3,
"rtol": 1e-4,
},
"log.final.star.SurfEnFluxTotal": {
"value": 3.025764e-12,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.star.TidalQ": {"value": 1.000000e06, "rtol": 1e-4},
"log.final.star.ImK2": {"value": -5.000000e-07, "rtol": 1e-4},
"log.final.star.K2": {"value": 0.500000, "rtol": 1e-4},
"log.final.star.K2Man": {"value": 0.010000, "rtol": 1e-4},
"log.final.star.Imk2Man": {"value": 0.000000, "rtol": 1e-4},
"log.final.star.TidalQMantle": {"value": 100.000000, "rtol": 1e-4},
"log.final.star.HEcc": {"value": 0.000000, "rtol": 1e-4},
"log.final.star.HZLimitDryRunaway": {
"value": 3.036202e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.star.HZLimRecVenus": {
"value": 2.502002e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.star.HZLimRunaway": {
"value": 3.267138e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.star.HZLimMoistGreenhouse": {
"value": 3.310536e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.star.HZLimMaxGreenhouse": {
"value": 5.611497e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.star.HZLimEarlyMars": {
"value": 6.122597e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.star.Instellation": {
"value": -1.000000,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.star.KEcc": {"value": 0.000000, "rtol": 1e-4},
"log.final.star.Eccentricity": {"value": -1.000000, "rtol": 1e-4},
"log.final.star.OrbEnergy": {"value": 0.000000, "unit": u.Joule, "rtol": 1e-4},
"log.final.star.MeanMotion": {
"value": -1.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.star.OrbPeriod": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.star.SemiMajorAxis": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.star.CriticalSemiMajorAxis": {
"value": -1.000000,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.star.COPP": {"value": 0.000000, "rtol": 1e-4},
"log.final.star.OrbAngMom": {
"value": 0.000000,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.star.LongP": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.star.LXUVTot": {
"value": 1.923000e20,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.star.TotOrbEnergy": {
"value": -1.740032e35,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.star.OrbPotEnergy": {
"value": -1.000000,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.star.LostEnergy": {
"value": 2.550749e26,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.star.LostAngMom": {
"value": 3.507532e30,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.star.LockTime": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.star.BodyDsemiDtEqtide": {"value": -1.000000, "rtol": 1e-4},
"log.final.star.BodyDeccDt": {"value": -1.000000, "rtol": 1e-4},
"log.final.star.DOblDtEqtide": {
"value": 0.000000,
"unit": u.rad / u.sec,
"rtol": 1e-4,
},
"log.final.star.DRotPerDtEqtide": {"value": 1.380178e-27, "rtol": 1e-4},
"log.final.star.DRotRateDtEqtide": {
"value": -1.161683e-36,
"unit": 1 / u.sec ** 2,
"rtol": 1e-4,
},
"log.final.star.EqRotRateDiscrete": {
"value": 6.510710e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.star.EqRotPerDiscrete": {
"value": 9.650538e05,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.star.EqRotRateCont": {
"value": 7.554427e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.star.EqRotPerCont": {
"value": 8.317222e05,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.star.EqRotPer": {"value": 9.650538e05, "unit": u.sec, "rtol": 1e-4},
"log.final.star.EqTidePower": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.star.GammaRot": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.star.GammaOrb": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.star.OceanK2": {"value": 0.010000, "rtol": 1e-4},
"log.final.star.EnvTidalQ": {"value": -1.000000, "rtol": 1e-4},
"log.final.star.OceanTidalQ": {"value": -1.000000, "rtol": 1e-4},
"log.final.star.TideLock": {"value": 0.000000, "rtol": 1e-4},
"log.final.star.RotTimeEqtide": {
"value": 2.047960e-37,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.star.EnvK2": {"value": 0.010000, "rtol": 1e-4},
"log.final.star.OblTimeEqtide": {"value": -1.000000, "rtol": 1e-4},
"log.final.star.PowerEqtide": {"value": 1546.776661, "unit": u.W, "rtol": 1e-4},
"log.final.star.SurfEnFluxEqtide": {
"value": 3.025764e-12,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.star.Luminosity": {"value": 1.923000e23, "unit": u.W, "rtol": 1e-4},
"log.final.star.LXUVStellar": {"value": 1.923000e20, "unit": u.W, "rtol": 1e-4},
"log.final.star.Temperature": {"value": 5778.000000, "unit": u.K, "rtol": 1e-4},
"log.final.star.LXUVFrac": {"value": 0.001000, "rtol": 1e-4},
"log.final.star.RossbyNumber": {"value": 0.078260, "rtol": 1e-4},
"log.final.star.DRotPerDtStellar": {"value": 6.530034e-18, "rtol": 1e-4},
"log.final.auto.Mass": {"value": 1.411359, "unit": u.Mearth, "rtol": 1e-4},
"log.final.auto.Obliquity": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.auto.PrecA": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.auto.Xobl": {"value": 1.570471e-162, "rtol": 1e-4},
"log.final.auto.Yobl": {"value": 0.000000, "rtol": 1e-4},
"log.final.auto.Zobl": {"value": 1.000002, "rtol": 1e-4},
"log.final.auto.Radius": {"value": 1.630278e08, "unit": u.m, "rtol": 1e-4},
"log.final.auto.RadGyra": {"value": 0.400000, "rtol": 1e-4},
"log.final.auto.RotAngMom": {
"value": 2.333687e35,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.auto.RotKinEnergy": {
"value": 7.596980e29,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.auto.RotVel": {
"value": 1061.426957,
"unit": u.m / u.sec,
"rtol": 1e-4,
},
"log.final.auto.BodyType": {"value": 0.000000, "rtol": 1e-4},
"log.final.auto.RotRate": {
"value": 6.510710e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.auto.RotPer": {"value": 11.169604, "unit": u.day, "rtol": 1e-4},
"log.final.auto.Density": {
"value": 0.464405,
"unit": u.kg / u.m ** 3,
"rtol": 1e-4,
},
"log.final.auto.SurfEnFluxTotal": {
"value": 52.542587,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.auto.TidalQ": {"value": -1.000000e05, "rtol": 1e-4},
"log.final.auto.ImK2": {"value": -5.000000e-06, "rtol": 1e-4},
"log.final.auto.K2": {"value": 0.500000, "rtol": 1e-4},
"log.final.auto.K2Man": {"value": 0.300000, "rtol": 1e-4},
"log.final.auto.Imk2Man": {"value": -0.003000, "rtol": 1e-4},
"log.final.auto.TidalQMantle": {"value": 100.000000, "rtol": 1e-4},
"log.final.auto.HEcc": {"value": 0.000000, "rtol": 1e-4},
"log.final.auto.HZLimitDryRunaway": {
"value": 3.062148e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.auto.HZLimRecVenus": {
"value": 2.502002e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.auto.HZLimRunaway": {
"value": 3.267138e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.auto.HZLimMoistGreenhouse": {
"value": 3.310536e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.auto.HZLimMaxGreenhouse": {
"value": 5.611497e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.auto.HZLimEarlyMars": {
"value": 6.122597e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.auto.Instellation": {
"value": 72.115242,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.auto.KEcc": {"value": 0.129902, "rtol": 1e-4},
"log.final.auto.Eccentricity": {"value": 0.129902, "rtol": 1e-4},
"log.final.auto.OrbEnergy": {
"value": -3.823257e34,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.auto.MeanMotion": {
"value": 6.510710e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.auto.OrbPeriod": {"value": 9.650538e05, "unit": u.sec, "rtol": 1e-4},
"log.final.auto.SemiMajorAxis": {"value": 0.097790, "unit": u.au, "rtol": 1e-4},
"log.final.auto.CriticalSemiMajorAxis": {
"value": -1.000000,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.auto.COPP": {"value": 0.000000, "rtol": 1e-4},
"log.final.auto.OrbAngMom": {
"value": 1.164500e40,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.auto.LongP": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.auto.LXUVTot": {
"value": -1.000000,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.auto.TotOrbEnergy": {
"value": -1.740032e35,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.auto.OrbPotEnergy": {
"value": -7.646514e34,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.auto.LostEnergy": {
"value": 1.264526e33,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.auto.TidalRadius": {"value": 1.630278e08, "unit": u.m, "rtol": 1e-4},
"log.final.auto.DsemiDtEqtide": {
"value": -6.842553e-06,
"unit": u.m / u.sec,
"rtol": 1e-4,
},
"log.final.auto.DeccDtEqtide": {
"value": -1.800337e-15,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.auto.DMeanMotionDtEqtide": {
"value": 4.567924e-21,
"unit": 1 / u.sec ** 2,
"rtol": 1e-4,
},
"log.final.auto.DOrbPerDtEqtide": {"value": -6.770832e-10, "rtol": 1e-4},
"log.final.auto.EccTimeEqtide": {
"value": 7.215422e13,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.auto.SemiTimeEqtide": {
"value": 2.137966e15,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.auto.DHEccDtEqtide": {
"value": -0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.auto.DKEccDtEqtide": {
"value": -1.800337e-15,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.auto.DXoblDtEqtide": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.auto.DYoblDtEqtide": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.auto.DZoblDtEqtide": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.auto.LockTime": {"value": 2.398144e11, "unit": u.sec, "rtol": 1e-4},
"log.final.auto.BodyDsemiDtEqtide": {"value": -1.000000, "rtol": 1e-4},
"log.final.auto.BodyDeccDt": {"value": -1.000000, "rtol": 1e-4},
"log.final.auto.DOblDtEqtide": {
"value": 0.000000,
"unit": u.rad / u.sec,
"rtol": 1e-4,
},
"log.final.auto.DRotPerDtEqtide": {"value": -8.245322e-298, "rtol": 1e-4},
"log.final.auto.DRotRateDtEqtide": {
"value": 5.562685e-309,
"unit": 1 / u.sec ** 2,
"rtol": 1e-4,
},
"log.final.auto.EqRotRateDiscrete": {
"value": 6.510710e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.auto.EqRotPerDiscrete": {
"value": 9.650538e05,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.auto.EqRotRateCont": {
"value": 7.554427e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.auto.EqRotPerCont": {
"value": 8.317222e05,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.auto.EqRotPer": {"value": 9.650538e05, "unit": u.sec, "rtol": 1e-4},
"log.final.auto.EqTidePower": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.auto.GammaRot": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.auto.GammaOrb": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.auto.OceanK2": {"value": 0.010000, "rtol": 1e-4},
"log.final.auto.EnvTidalQ": {"value": -1.000000, "rtol": 1e-4},
"log.final.auto.OceanTidalQ": {"value": -1.000000, "rtol": 1e-4},
"log.final.auto.TideLock": {"value": 1.000000, "rtol": 1e-4},
"log.final.auto.RotTimeEqtide": {
"value": 1.170426e303,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.auto.EnvK2": {"value": 0.500000, "rtol": 1e-4},
"log.final.auto.OblTimeEqtide": {"value": -1.000000, "rtol": 1e-4},
"log.final.auto.PowerEqtide": {"value": 1.788269e19, "unit": u.W, "rtol": 1e-4},
"log.final.auto.SurfEnFluxEqtide": {
"value": 53.542587,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.auto.SurfWaterMass": {"value": 0.000000, "unit": u.kg, "rtol": 1e-4},
"log.final.auto.EnvelopeMass": {
"value": 0.411359,
"unit": u.Mearth,
"rtol": 1e-4,
},
"log.final.auto.OxygenMass": {"value": 0.000000, "unit": u.kg, "rtol": 1e-4},
"log.final.auto.RGLimit": {"value": 3.141989e09, "unit": u.m, "rtol": 1e-4},
"log.final.auto.XO": {"value": 0.000000, "rtol": 1e-4},
"log.final.auto.EtaO": {"value": 0.000000, "rtol": 1e-4},
"log.final.auto.PlanetRadius": {
"value": 25.560564,
"unit": u.Rearth,
"rtol": 1e-4,
},
"log.final.auto.OxygenMantleMass": {
"value": 0.000000,
"unit": u.kg,
"rtol": 1e-4,
},
"log.final.auto.RadXUV": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.auto.RadSolid": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.auto.PresXUV": {"value": 5.000000, "rtol": 1e-4},
"log.final.auto.ScaleHeight": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.auto.ThermTemp": {"value": 400.000000, "unit": u.K, "rtol": 1e-4},
"log.final.auto.AtmGasConst": {"value": 4124.000000, "rtol": 1e-4},
"log.final.auto.PresSurf": {"value": -1.000000, "unit": u.Pa, "rtol": 1e-4},
"log.final.auto.DEnvMassDt": {
"value": -7.802590e07,
"unit": u.kg / u.sec,
"rtol": 1e-4,
},
"log.final.auto.FXUV": {
"value": 0.072115,
"unit": u.W / u.m ** 2,
"rtol": 1e-4,
},
"log.final.auto.AtmXAbsEffH2O": {"value": 0.300000, "rtol": 1e-4},
"log.final.auto.RocheRadius": {"value": 1.641596e08, "unit": u.m, "rtol": 1e-4},
"log.final.auto.BondiRadius": {"value": 5.637137e08, "unit": u.m, "rtol": 1e-4},
"log.final.auto.HEscapeRegime": {"value": 6.000000, "rtol": 1e-4},
"log.final.auto.RRCriticalFlux": {
"value": 1.470664e-06,
"unit": u.W / u.m ** 2,
"rtol": 1e-4,
},
"log.final.auto.KTide": {"value": 0.100000, "rtol": 1e-4},
"log.final.auto.RGDuration": {"value": 1.00000e06, "unit": u.yr, "rtol": 1e-4},
"log.final.bondi.Mass": {"value": 1.000000, "unit": u.Mearth, "rtol": 1e-4},
"log.final.bondi.Obliquity": {
"value": 2.992105e-54,
"unit": u.rad,
"rtol": 1e-4,
},
"log.final.bondi.PrecA": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.bondi.Xobl": {"value": 2.992105e-54, "rtol": 1e-4},
"log.final.bondi.Yobl": {"value": 0.000000, "rtol": 1e-4},
"log.final.bondi.Zobl": {"value": 1.000000, "rtol": 1e-4},
"log.final.bondi.Radius": {"value": 6.378100e06, "unit": u.m, "rtol": 1e-4},
"log.final.bondi.RadGyra": {"value": 0.400000, "rtol": 1e-4},
"log.final.bondi.RotAngMom": {
"value": 2.447267e32,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.RotKinEnergy": {
"value": 7.703654e26,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.bondi.RotVel": {
"value": 40.154732,
"unit": u.m / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.BodyType": {"value": 0.000000, "rtol": 1e-4},
"log.final.bondi.RotRate": {
"value": 6.295720e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.RotPer": {"value": 11.551030, "unit": u.day, "rtol": 1e-4},
"log.final.bondi.Density": {
"value": 5495.038549,
"unit": u.kg / u.m ** 3,
"rtol": 1e-4,
},
"log.final.bondi.SurfEnFluxTotal": {
"value": 3.855526,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.bondi.TidalQ": {"value": -100.000000, "rtol": 1e-4},
"log.final.bondi.ImK2": {"value": -0.003000, "rtol": 1e-4},
"log.final.bondi.K2": {"value": 0.500000, "rtol": 1e-4},
"log.final.bondi.K2Man": {"value": 0.300000, "rtol": 1e-4},
"log.final.bondi.Imk2Man": {"value": -0.003000, "rtol": 1e-4},
"log.final.bondi.TidalQMantle": {"value": 100.000000, "rtol": 1e-4},
"log.final.bondi.HEcc": {"value": 0.000000, "rtol": 1e-4},
"log.final.bondi.HZLimitDryRunaway": {
"value": 3.098815e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.bondi.HZLimRecVenus": {
"value": 2.502002e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.bondi.HZLimRunaway": {
"value": 3.267138e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.bondi.HZLimMoistGreenhouse": {
"value": 3.310536e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.bondi.HZLimMaxGreenhouse": {
"value": 5.611497e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.bondi.HZLimEarlyMars": {
"value": 6.122597e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.bondi.Instellation": {
"value": 69.783534,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.bondi.KEcc": {"value": 0.200007, "rtol": 1e-4},
"log.final.bondi.Eccentricity": {"value": 0.200007, "rtol": 1e-4},
"log.final.bondi.OrbEnergy": {
"value": -2.648953e34,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.bondi.MeanMotion": {
"value": 6.295720e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.OrbPeriod": {
"value": 9.980090e05,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.bondi.SemiMajorAxis": {
"value": 0.100004,
"unit": u.au,
"rtol": 1e-4,
},
"log.final.bondi.CriticalSemiMajorAxis": {
"value": -1.000000,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.bondi.COPP": {"value": 0.000000, "rtol": 1e-4},
"log.final.bondi.OrbAngMom": {
"value": 8.245061e39,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.LongP": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.bondi.LXUVTot": {
"value": -1.000000,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.bondi.TotOrbEnergy": {
"value": -1.740032e35,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.bondi.OrbPotEnergy": {
"value": -5.297906e34,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.bondi.LostEnergy": {
"value": 3.022858e31,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.bondi.TidalRadius": {
"value": 6.378100e06,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.bondi.DsemiDtEqtide": {
"value": -1.113122e-09,
"unit": u.m / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.DeccDtEqtide": {
"value": -1.860062e-19,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.DMeanMotionDtEqtide": {
"value": 7.026492e-25,
"unit": 1 / u.sec ** 2,
"rtol": 1e-4,
},
"log.final.bondi.DOrbPerDtEqtide": {"value": -1.113852e-13, "rtol": 1e-4},
"log.final.bondi.EccTimeEqtide": {
"value": 1.075269e18,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.bondi.SemiTimeEqtide": {
"value": 1.343996e19,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.bondi.DHEccDtEqtide": {
"value": -0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.DKEccDtEqtide": {
"value": -1.860062e-19,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.DXoblDtEqtide": {
"value": -1.366908e-65,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.DYoblDtEqtide": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.DZoblDtEqtide": {
"value": 4.089932e-119,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.LockTime": {"value": 4.919130e12, "unit": u.sec, "rtol": 1e-4},
"log.final.bondi.BodyDsemiDtEqtide": {"value": -1.000000, "rtol": 1e-4},
"log.final.bondi.BodyDeccDt": {"value": -1.000000, "rtol": 1e-4},
"log.final.bondi.DOblDtEqtide": {
"value": -1.366908e-65,
"unit": u.rad / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.DRotPerDtEqtide": {"value": -8.818069e-298, "rtol": 1e-4},
"log.final.bondi.DRotRateDtEqtide": {
"value": 5.562685e-309,
"unit": 1 / u.sec ** 2,
"rtol": 1e-4,
},
"log.final.bondi.EqRotRateDiscrete": {
"value": 6.295720e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.EqRotPerDiscrete": {
"value": 9.980090e05,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.bondi.EqRotRateCont": {
"value": 8.688253e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.EqRotPerCont": {
"value": 7.231816e05,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.bondi.EqRotPer": {"value": 9.980090e05, "unit": u.sec, "rtol": 1e-4},
"log.final.bondi.EqTidePower": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.GammaRot": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.bondi.GammaOrb": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.bondi.OceanK2": {"value": 0.010000, "rtol": 1e-4},
"log.final.bondi.EnvTidalQ": {"value": -1.000000, "rtol": 1e-4},
"log.final.bondi.OceanTidalQ": {"value": -1.000000, "rtol": 1e-4},
"log.final.bondi.TideLock": {"value": 1.000000, "rtol": 1e-4},
"log.final.bondi.RotTimeEqtide": {
"value": 1.131777e303,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.bondi.EnvK2": {"value": 0.500000, "rtol": 1e-4},
"log.final.bondi.OblTimeEqtide": {"value": -1.000000, "rtol": 1e-4},
"log.final.bondi.PowerEqtide": {
"value": 1.970952e15,
"unit": u.W,
"rtol": 1e-4,
},
"log.final.bondi.SurfEnFluxEqtide": {
"value": 3.855526,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.bondi.SurfWaterMass": {
"value": 0.000000,
"unit": u.kg,
"rtol": 1e-4,
},
"log.final.bondi.EnvelopeMass": {
"value": 0.000000,
"unit": u.Mearth,
"rtol": 1e-4,
},
"log.final.bondi.OxygenMass": {"value": 0.000000, "unit": u.kg, "rtol": 1e-4},
"log.final.bondi.RGLimit": {"value": 3.147864e09, "unit": u.m, "rtol": 1e-4},
"log.final.bondi.XO": {"value": 0.000000, "rtol": 1e-4},
"log.final.bondi.EtaO": {"value": 0.000000, "rtol": 1e-4},
"log.final.bondi.PlanetRadius": {
"value": 1.000000,
"unit": u.Rearth,
"rtol": 1e-4,
},
"log.final.bondi.OxygenMantleMass": {
"value": 0.000000,
"unit": u.kg,
"rtol": 1e-4,
},
"log.final.bondi.RadXUV": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.bondi.RadSolid": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.bondi.PresXUV": {"value": 5.000000, "rtol": 1e-4},
"log.final.bondi.ScaleHeight": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.bondi.ThermTemp": {"value": 400.000000, "unit": u.K, "rtol": 1e-4},
"log.final.bondi.AtmGasConst": {"value": 4124.000000, "rtol": 1e-4},
"log.final.bondi.PresSurf": {"value": -1.000000, "unit": u.Pa, "rtol": 1e-4},
"log.final.bondi.DEnvMassDt": {
"value": 5.562685e-309,
"unit": u.kg / u.sec,
"rtol": 1e-4,
},
"log.final.bondi.FXUV": {
"value": 0.069784,
"unit": u.W / u.m ** 2,
"rtol": 1e-4,
},
"log.final.bondi.AtmXAbsEffH2O": {"value": 0.300000, "rtol": 1e-4},
"log.final.bondi.RocheRadius": {
"value": 1.496611e08,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.bondi.BondiRadius": {
"value": 3.949665e08,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.bondi.HEscapeRegime": {"value": 8.000000, "rtol": 1e-4},
"log.final.bondi.RRCriticalFlux": {
"value": 1.080455,
"unit": u.W / u.m ** 2,
"rtol": 1e-4,
},
"log.final.bondi.KTide": {"value": 0.936113, "rtol": 1e-4},
"log.final.bondi.RGDuration": {"value": 1.00000e06, "unit": u.yr, "rtol": 1e-4},
"log.final.el.Mass": {"value": 1.986370, "unit": u.Mearth, "rtol": 1e-4},
"log.final.el.Obliquity": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.el.PrecA": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.el.Xobl": {"value": 1.563609e-162, "rtol": 1e-4},
"log.final.el.Yobl": {"value": 0.000000, "rtol": 1e-4},
"log.final.el.Zobl": {"value": 1.000000, "rtol": 1e-4},
"log.final.el.Radius": {"value": 2.084572e08, "unit": u.m, "rtol": 1e-4},
"log.final.el.RadGyra": {"value": 0.400000, "rtol": 1e-4},
"log.final.el.RotAngMom": {
"value": 5.470333e35,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.el.RotKinEnergy": {
"value": 1.814054e30,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.el.RotVel": {
"value": 1382.558099,
"unit": u.m / u.sec,
"rtol": 1e-4,
},
"log.final.el.BodyType": {"value": 0.000000, "rtol": 1e-4},
"log.final.el.RotRate": {
"value": 6.632335e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.el.RotPer": {"value": 10.964773, "unit": u.day, "rtol": 1e-4},
"log.final.el.Density": {
"value": 0.312647,
"unit": u.kg / u.m ** 3,
"rtol": 1e-4,
},
"log.final.el.SurfEnFluxTotal": {
"value": 33.098849,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.el.TidalQ": {"value": -1.000000e05, "rtol": 1e-4},
"log.final.el.ImK2": {"value": -5.000000e-06, "rtol": 1e-4},
"log.final.el.K2": {"value": 0.500000, "rtol": 1e-4},
"log.final.el.K2Man": {"value": 0.300000, "rtol": 1e-4},
"log.final.el.Imk2Man": {"value": -0.003000, "rtol": 1e-4},
"log.final.el.TidalQMantle": {"value": 100.000000, "rtol": 1e-4},
"log.final.el.HEcc": {"value": 0.000000, "rtol": 1e-4},
"log.final.el.HZLimitDryRunaway": {
"value": 3.043341e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.el.HZLimRecVenus": {"value": 2.502002e09, "unit": u.m, "rtol": 1e-4},
"log.final.el.HZLimRunaway": {"value": 3.267138e09, "unit": u.m, "rtol": 1e-4},
"log.final.el.HZLimMoistGreenhouse": {
"value": 3.310536e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.el.HZLimMaxGreenhouse": {
"value": 5.611497e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.el.HZLimEarlyMars": {
"value": 6.122597e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.el.Instellation": {
"value": 73.462978,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.el.KEcc": {"value": 0.068456, "rtol": 1e-4},
"log.final.el.Eccentricity": {"value": 0.068456, "rtol": 1e-4},
"log.final.el.OrbEnergy": {
"value": -5.447720e34,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.el.MeanMotion": {
"value": 6.632335e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.el.OrbPeriod": {"value": 9.473564e05, "unit": u.sec, "rtol": 1e-4},
"log.final.el.SemiMajorAxis": {"value": 0.096591, "unit": u.au, "rtol": 1e-4},
"log.final.el.CriticalSemiMajorAxis": {
"value": -1.000000,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.el.COPP": {"value": 0.000000, "rtol": 1e-4},
"log.final.el.OrbAngMom": {
"value": 1.638922e40,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.el.LongP": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.el.LXUVTot": {
"value": -1.000000,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.el.TotOrbEnergy": {
"value": -1.740032e35,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.el.OrbPotEnergy": {
"value": -1.089544e35,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.el.LostEnergy": {
"value": 2.085924e33,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.el.TidalRadius": {"value": 2.084572e08, "unit": u.m, "rtol": 1e-4},
"log.final.el.DsemiDtEqtide": {
"value": -4.938887e-06,
"unit": u.m / u.sec,
"rtol": 1e-4,
},
"log.final.el.DeccDtEqtide": {
"value": -2.496493e-15,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.el.DMeanMotionDtEqtide": {
"value": 3.400372e-21,
"unit": 1 / u.sec ** 2,
"rtol": 1e-4,
},
"log.final.el.DOrbPerDtEqtide": {"value": -4.857058e-10, "rtol": 1e-4},
"log.final.el.EccTimeEqtide": {
"value": 2.742067e13,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.el.SemiTimeEqtide": {
"value": 2.925710e15,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.el.DHEccDtEqtide": {
"value": -0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.el.DKEccDtEqtide": {
"value": -2.496493e-15,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.el.DXoblDtEqtide": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.el.DYoblDtEqtide": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.el.DZoblDtEqtide": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.el.LockTime": {"value": 8.193554e10, "unit": u.sec, "rtol": 1e-4},
"log.final.el.BodyDsemiDtEqtide": {"value": -1.000000, "rtol": 1e-4},
"log.final.el.BodyDeccDt": {"value": -1.000000, "rtol": 1e-4},
"log.final.el.DOblDtEqtide": {
"value": 0.000000,
"unit": u.rad / u.sec,
"rtol": 1e-4,
},
"log.final.el.DRotPerDtEqtide": {"value": -7.945685e-298, "rtol": 1e-4},
"log.final.el.DRotRateDtEqtide": {
"value": 5.562685e-309,
"unit": 1 / u.sec ** 2,
"rtol": 1e-4,
},
"log.final.el.EqRotRateDiscrete": {
"value": 6.632335e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.el.EqRotPerDiscrete": {
"value": 9.473564e05,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.el.EqRotRateCont": {
"value": 6.927597e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.el.EqRotPerCont": {
"value": 9.069791e05,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.el.EqRotPer": {"value": 9.473564e05, "unit": u.sec, "rtol": 1e-4},
"log.final.el.EqTidePower": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.el.GammaRot": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.el.GammaOrb": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.el.OceanK2": {"value": 0.010000, "rtol": 1e-4},
"log.final.el.EnvTidalQ": {"value": -1.000000, "rtol": 1e-4},
"log.final.el.OceanTidalQ": {"value": -1.000000, "rtol": 1e-4},
"log.final.el.TideLock": {"value": 1.000000, "rtol": 1e-4},
"log.final.el.RotTimeEqtide": {
"value": 1.192290e303,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.el.EnvK2": {"value": 0.500000, "rtol": 1e-4},
"log.final.el.OblTimeEqtide": {"value": -1.000000, "rtol": 1e-4},
"log.final.el.PowerEqtide": {"value": 1.862016e19, "unit": u.W, "rtol": 1e-4},
"log.final.el.SurfEnFluxEqtide": {
"value": 34.098849,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.el.SurfWaterMass": {"value": 0.000000, "unit": u.kg, "rtol": 1e-4},
"log.final.el.EnvelopeMass": {
"value": 0.986370,
"unit": u.Mearth,
"rtol": 1e-4,
},
"log.final.el.OxygenMass": {"value": 0.000000, "unit": u.kg, "rtol": 1e-4},
"log.final.el.RGLimit": {"value": 3.127704e09, "unit": u.m, "rtol": 1e-4},
"log.final.el.XO": {"value": 0.000000, "rtol": 1e-4},
"log.final.el.EtaO": {"value": 0.000000, "rtol": 1e-4},
"log.final.el.PlanetRadius": {
"value": 32.683276,
"unit": u.Rearth,
"rtol": 1e-4,
},
"log.final.el.OxygenMantleMass": {
"value": 0.000000,
"unit": u.kg,
"rtol": 1e-4,
},
"log.final.el.RadXUV": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.el.RadSolid": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.el.PresXUV": {"value": 5.000000, "rtol": 1e-4},
"log.final.el.ScaleHeight": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.el.ThermTemp": {"value": 400.000000, "unit": u.K, "rtol": 1e-4},
"log.final.el.AtmGasConst": {"value": 4124.000000, "rtol": 1e-4},
"log.final.el.PresSurf": {"value": -1.000000, "unit": u.Pa, "rtol": 1e-4},
"log.final.el.DEnvMassDt": {
"value": -2.614005e09,
"unit": u.kg / u.sec,
"rtol": 1e-4,
},
"log.final.el.FXUV": {"value": 0.073463, "unit": u.W / u.m ** 2, "rtol": 1e-4},
"log.final.el.AtmXAbsEffH2O": {"value": 0.300000, "rtol": 1e-4},
"log.final.el.RocheRadius": {"value": 1.817114e08, "unit": u.m, "rtol": 1e-4},
"log.final.el.BondiRadius": {"value": 7.982897e08, "unit": u.m, "rtol": 1e-4},
"log.final.el.HEscapeRegime": {"value": 3.000000, "rtol": 1e-4},
"log.final.el.RRCriticalFlux": {
"value": 0.000139,
"unit": u.W / u.m ** 2,
"rtol": 1e-4,
},
"log.final.el.KTide": {"value": 1.000000, "rtol": 1e-4},
"log.final.el.RGDuration": {"value": 1.00000e06, "unit": u.yr, "rtol": 1e-4},
"log.final.rr.Mass": {"value": 1.999399, "unit": u.Mearth, "rtol": 1e-4},
"log.final.rr.Obliquity": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.rr.PrecA": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.rr.Xobl": {"value": 1.563665e-162, "rtol": 1e-4},
"log.final.rr.Yobl": {"value": 0.000000, "rtol": 1e-4},
"log.final.rr.Zobl": {"value": 1.000000, "rtol": 1e-4},
"log.final.rr.Radius": {"value": 2.095926e08, "unit": u.m, "rtol": 1e-4},
"log.final.rr.RadGyra": {"value": 0.400000, "rtol": 1e-4},
"log.final.rr.RotAngMom": {
"value": 5.561691e35,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.rr.RotKinEnergy": {
"value": 1.842803e30,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.rr.RotVel": {
"value": 1388.922578,
"unit": u.m / u.sec,
"rtol": 1e-4,
},
"log.final.rr.BodyType": {"value": 0.000000, "rtol": 1e-4},
"log.final.rr.RotRate": {
"value": 6.626773e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.rr.RotPer": {"value": 10.973977, "unit": u.day, "rtol": 1e-4},
"log.final.rr.Density": {
"value": 0.309611,
"unit": u.kg / u.m ** 3,
"rtol": 1e-4,
},
"log.final.rr.SurfEnFluxTotal": {
"value": 33.332187,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.rr.TidalQ": {"value": -1.000000e05, "rtol": 1e-4},
"log.final.rr.ImK2": {"value": -5.000000e-06, "rtol": 1e-4},
"log.final.rr.K2": {"value": 0.500000, "rtol": 1e-4},
"log.final.rr.K2Man": {"value": 0.300000, "rtol": 1e-4},
"log.final.rr.Imk2Man": {"value": -0.003000, "rtol": 1e-4},
"log.final.rr.TidalQMantle": {"value": 100.000000, "rtol": 1e-4},
"log.final.rr.HEcc": {"value": 0.000000, "rtol": 1e-4},
"log.final.rr.HZLimitDryRunaway": {
"value": 3.043303e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.rr.HZLimRecVenus": {"value": 2.502002e09, "unit": u.m, "rtol": 1e-4},
"log.final.rr.HZLimRunaway": {"value": 3.267138e09, "unit": u.m, "rtol": 1e-4},
"log.final.rr.HZLimMoistGreenhouse": {
"value": 3.310536e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.rr.HZLimMaxGreenhouse": {
"value": 5.611497e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.rr.HZLimEarlyMars": {
"value": 6.122597e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.rr.Instellation": {
"value": 73.379922,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.rr.KEcc": {"value": 0.068275, "rtol": 1e-4},
"log.final.rr.Eccentricity": {"value": 0.068275, "rtol": 1e-4},
"log.final.rr.OrbEnergy": {
"value": -5.480386e34,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.rr.MeanMotion": {
"value": 6.626773e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.rr.OrbPeriod": {"value": 9.481516e05, "unit": u.sec, "rtol": 1e-4},
"log.final.rr.SemiMajorAxis": {"value": 0.096645, "unit": u.au, "rtol": 1e-4},
"log.final.rr.CriticalSemiMajorAxis": {
"value": -1.000000,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.rr.COPP": {"value": 0.000000, "rtol": 1e-4},
"log.final.rr.OrbAngMom": {
"value": 1.650154e40,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.rr.LongP": {"value": 0.000000, "unit": u.rad, "rtol": 1e-4},
"log.final.rr.LXUVTot": {
"value": -1.000000,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.rr.TotOrbEnergy": {
"value": -1.740032e35,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.rr.OrbPotEnergy": {
"value": -1.096077e35,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.rr.LostEnergy": {
"value": 2.725921e33,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.rr.TidalRadius": {"value": 2.095926e08, "unit": u.m, "rtol": 1e-4},
"log.final.rr.DsemiDtEqtide": {
"value": -4.999833e-06,
"unit": u.m / u.sec,
"rtol": 1e-4,
},
"log.final.rr.DeccDtEqtide": {
"value": -2.532564e-15,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.rr.DMeanMotionDtEqtide": {
"value": 3.437522e-21,
"unit": 1 / u.sec ** 2,
"rtol": 1e-4,
},
"log.final.rr.DOrbPerDtEqtide": {"value": -4.918370e-10, "rtol": 1e-4},
"log.final.rr.EccTimeEqtide": {
"value": 2.695885e13,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.rr.SemiTimeEqtide": {
"value": 2.891664e15,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.rr.DHEccDtEqtide": {
"value": -0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.rr.DKEccDtEqtide": {
"value": -2.532564e-15,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.rr.DXoblDtEqtide": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.rr.DYoblDtEqtide": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.rr.DZoblDtEqtide": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.rr.LockTime": {"value": 1.711407e11, "unit": u.sec, "rtol": 1e-4},
"log.final.rr.BodyDsemiDtEqtide": {"value": -1.000000, "rtol": 1e-4},
"log.final.rr.BodyDeccDt": {"value": -1.000000, "rtol": 1e-4},
"log.final.rr.DOblDtEqtide": {
"value": 0.000000,
"unit": u.rad / u.sec,
"rtol": 1e-4,
},
"log.final.rr.DRotPerDtEqtide": {"value": -7.959031e-298, "rtol": 1e-4},
"log.final.rr.DRotRateDtEqtide": {
"value": 5.562685e-309,
"unit": 1 / u.sec ** 2,
"rtol": 1e-4,
},
"log.final.rr.EqRotRateDiscrete": {
"value": 6.626773e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.rr.EqRotPerDiscrete": {
"value": 9.481516e05,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.rr.EqRotRateCont": {
"value": 6.920233e-06,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.rr.EqRotPerCont": {
"value": 9.079442e05,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.rr.EqRotPer": {"value": 9.481516e05, "unit": u.sec, "rtol": 1e-4},
"log.final.rr.EqTidePower": {
"value": 0.000000,
"unit": 1 / u.sec,
"rtol": 1e-4,
},
"log.final.rr.GammaRot": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.rr.GammaOrb": {"value": -1.000000, "unit": u.sec, "rtol": 1e-4},
"log.final.rr.OceanK2": {"value": 0.010000, "rtol": 1e-4},
"log.final.rr.EnvTidalQ": {"value": -1.000000, "rtol": 1e-4},
"log.final.rr.OceanTidalQ": {"value": -1.000000, "rtol": 1e-4},
"log.final.rr.TideLock": {"value": 1.000000, "rtol": 1e-4},
"log.final.rr.RotTimeEqtide": {
"value": 1.191290e303,
"unit": u.sec,
"rtol": 1e-4,
},
"log.final.rr.EnvK2": {"value": 0.500000, "rtol": 1e-4},
"log.final.rr.OblTimeEqtide": {"value": -1.000000, "rtol": 1e-4},
"log.final.rr.PowerEqtide": {"value": 1.895236e19, "unit": u.W, "rtol": 1e-4},
"log.final.rr.SurfEnFluxEqtide": {
"value": 34.332187,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.rr.SurfWaterMass": {"value": 0.000000, "unit": u.kg, "rtol": 1e-4},
"log.final.rr.EnvelopeMass": {
"value": 0.999399,
"unit": u.Mearth,
"rtol": 1e-4,
},
"log.final.rr.OxygenMass": {"value": 0.000000, "unit": u.kg, "rtol": 1e-4},
"log.final.rr.RGLimit": {"value": 3.127270e09, "unit": u.m, "rtol": 1e-4},
"log.final.rr.XO": {"value": 0.000000, "rtol": 1e-4},
"log.final.rr.EtaO": {"value": 0.000000, "rtol": 1e-4},
"log.final.rr.PlanetRadius": {
"value": 32.861293,
"unit": u.Rearth,
"rtol": 1e-4,
},
"log.final.rr.OxygenMantleMass": {
"value": 0.000000,
"unit": u.kg,
"rtol": 1e-4,
},
"log.final.rr.RadXUV": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.rr.RadSolid": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.rr.PresXUV": {"value": 5.000000, "rtol": 1e-4},
"log.final.rr.ScaleHeight": {"value": -1.000000, "unit": u.m, "rtol": 1e-4},
"log.final.rr.ThermTemp": {"value": 400.000000, "unit": u.K, "rtol": 1e-4},
"log.final.rr.AtmGasConst": {"value": 4124.000000, "rtol": 1e-4},
"log.final.rr.PresSurf": {"value": -1.000000, "unit": u.Pa, "rtol": 1e-4},
"log.final.rr.DEnvMassDt": {
"value": -1.147322e08,
"unit": u.kg / u.sec,
"rtol": 1e-4,
},
"log.final.rr.FXUV": {"value": 0.073380, "unit": u.W / u.m ** 2, "rtol": 1e-4},
"log.final.rr.AtmXAbsEffH2O": {"value": 0.300000, "rtol": 1e-4},
"log.final.rr.RocheRadius": {"value": 1.822097e08, "unit": u.m, "rtol": 1e-4},
"log.final.rr.BondiRadius": {"value": 8.033012e08, "unit": u.m, "rtol": 1e-4},
"log.final.rr.HEscapeRegime": {"value": 6.000000, "rtol": 1e-4},
"log.final.rr.RRCriticalFlux": {
"value": 0.000139,
"unit": u.W / u.m ** 2,
"rtol": 1e-4,
},
"log.final.rr.KTide": {"value": 1.000000, "rtol": 1e-4},
"log.final.rr.RGDuration": {"value": 1.00000e06, "unit": u.yr, "rtol": 1e-4},
}
)
class TestLopez12CPL(Benchmark):
pass
| 0
| 20
| 22
|
35b4a123604f3ad39f518c8cd7cd58c05193a395
| 7,579
|
py
|
Python
|
common-python/rest_wrappers/oc/oc/upload_storage_object.py
|
LaudateCorpus1/atg-commerce-iaas
|
f1ae31657fc0111a5c019d46a28a3c81aae1acb2
|
[
"MIT"
] | 28
|
2016-11-07T14:03:25.000Z
|
2022-02-01T08:46:52.000Z
|
common-python/rest_wrappers/oc/oc/upload_storage_object.py
|
LaudateCorpus1/atg-commerce-iaas
|
f1ae31657fc0111a5c019d46a28a3c81aae1acb2
|
[
"MIT"
] | 3
|
2016-11-09T13:23:03.000Z
|
2018-04-05T15:49:22.000Z
|
common-python/rest_wrappers/oc/oc/upload_storage_object.py
|
LaudateCorpus1/atg-commerce-iaas
|
f1ae31657fc0111a5c019d46a28a3c81aae1acb2
|
[
"MIT"
] | 13
|
2016-10-27T17:59:38.000Z
|
2022-02-18T04:38:38.000Z
|
#!/usr/bin/python
# Copyright (c) 2013, 2014-2017 Oracle and/or its affiliates. All rights reserved.
"""Provide Module Description
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
__author__ = "Andrew Hopkinson (Oracle Cloud Solutions A-Team)"
__copyright__ = "Copyright (c) 2013, 2014-2017 Oracle and/or its affiliates. All rights reserved."
__ekitversion__ = "@VERSION@"
__ekitrelease__ = "@RELEASE@"
__version__ = "1.0.0.0"
__date__ = "@BUILDDATE@"
__status__ = "Development"
__module__ = "upload_storage_object"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
import datetime
import getopt
import hashlib
import json
import locale
import logging
import multiprocessing
import operator
import os
import requests
import shutil
import subprocess
import sys
import tempfile
from contextlib import closing
# Import utility methods
from oscsutils import callRESTApi
from oscsutils import getPassword
from oscsutils import printJSON
from authenticate_oscs import authenticate
from oc_exceptions import REST401Exception
# Define methods
# Read Module Arguments
# Main processing function
# Main function to kick off processing
if __name__ == "__main__":
main(sys.argv[1:])
| 35.919431
| 197
| 0.613933
|
#!/usr/bin/python
# Copyright (c) 2013, 2014-2017 Oracle and/or its affiliates. All rights reserved.
"""Provide Module Description
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
__author__ = "Andrew Hopkinson (Oracle Cloud Solutions A-Team)"
__copyright__ = "Copyright (c) 2013, 2014-2017 Oracle and/or its affiliates. All rights reserved."
__ekitversion__ = "@VERSION@"
__ekitrelease__ = "@RELEASE@"
__version__ = "1.0.0.0"
__date__ = "@BUILDDATE@"
__status__ = "Development"
__module__ = "upload_storage_object"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
import datetime
import getopt
import hashlib
import json
import locale
import logging
import multiprocessing
import operator
import os
import requests
import shutil
import subprocess
import sys
import tempfile
from contextlib import closing
# Import utility methods
from oscsutils import callRESTApi
from oscsutils import getPassword
from oscsutils import printJSON
from authenticate_oscs import authenticate
from oc_exceptions import REST401Exception
# Define methods
def md5(fname, readbuf=104857600, **kwargs):
hash_md5 = hashlib.md5()
cnt = 1
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(readbuf), b""):
hash_md5.update(chunk)
#print('Chunk: '+str(cnt))
cnt +=1
return hash_md5.hexdigest()
def getsplitprefix(filename):
return os.path.split(filename)[-1] + '-'
def getsplitdir(filename):
return filename + '.split'
def splitfile(filename, size='5GB', **kwargs):
files = []
if filename is not None:
splitdir = getsplitdir(filename)
os.makedirs(splitdir)
prefix = os.path.join(splitdir, getsplitprefix(filename))
cmd = ['split', '-b', size, filename, prefix]
cmdEnv = dict(os.environ)
outputLines = []
with closing(tempfile.TemporaryFile()) as fout:
try:
outputLines = subprocess.check_output(cmd, env=cmdEnv, stderr=fout).splitlines()
except subprocess.CalledProcessError as e:
fout.flush()
fout.seek(0)
print(fout.read())
print('\n'.join(outputLines))
raise e
return [os.path.join(splitdir, fn) for fn in os.listdir(splitdir)]
def uploadfile((endpoint, basepath, authtoken, filename, authendpoint, user, password, headers, params)):
print('Uploading : ' + filename)
files = None
resourcename = os.path.split(filename)[-1]
try:
with closing(open(filename, 'rb')) as f:
response = callRESTApi(endpoint, basepath, resourcename, method='PUT', authtoken=authtoken, headers=headers, params=params, data=f, files=files)
except REST401Exception as e:
# Reauthenticate and retry
if authendpoint is not None and user is not None and password is not None:
authtoken, endpoint = authenticate(authendpoint, user, password)
with closing(open(filename, 'rb')) as f:
response = callRESTApi(endpoint, basepath, resourcename, method='PUT', authtoken=authtoken, headers=headers, params=params, data=f, files=files)
else:
raise
print('Uploaded : ' + filename)
return
def uploadStorageObject(endpoint, container='compute_images', authtoken=None, filename=None, splitsize=4000, poolsize=4, authendpoint=None, user=None, password=None, extractarchive=None, **kwargs):
basepath = container
imgbasepath = basepath
splitbasepath = basepath + '_segments'
headers = None
params = None
if extractarchive is not None:
if params is None:
params = {}
params['extract-archive'] = extractarchive
data = None
files = None
jsonResponse = ''
if filename is not None and os.path.exists(filename):
#md5hash = md5(filename)
filesize = os.path.getsize(filename)
filesize /= (1024 * 1024)
if filesize > splitsize:
print('Splitting : ' + filename)
filelist = splitfile(filename, str(splitsize) + 'MB')
print('Into ' + str(len(filelist)) + ' segments')
basepath = splitbasepath + '/' + os.path.split(filename)[-1] + '/_segment_'
pool = multiprocessing.Pool(poolsize)
# Build tupal list
workerdata = []
for fn in filelist:
workerdata.append([endpoint, basepath, authtoken, fn, authendpoint, user, password, headers, params])
#print(workerdata)
# Start processes
pool.map(uploadfile, workerdata)
# Upload manifest file to point to parts
manifest = basepath + '/' + getsplitprefix(filename)
resourcename = os.path.split(filename)[-1]
headers = {'Content-Length': "0", 'X-Object-Manifest': manifest}
printJSON(headers)
data = None
basepath = imgbasepath
try:
response = callRESTApi(endpoint, basepath, resourcename, method='PUT', authtoken=authtoken, headers=headers, params=params, data=data, files=files)
except REST401Exception as e:
# Reauthenticate and retry
if authendpoint is not None and user is not None and password is not None:
authtoken, endpoint = authenticate(authendpoint, user, password)
response = callRESTApi(endpoint, basepath, resourcename, method='PUT', authtoken=authtoken, headers=headers, params=params, data=data, files=files)
else:
raise
# Remove splitfiles
splitdir = getsplitdir(filename)
shutil.rmtree(splitdir)
else:
# Simple single file upload
basepath = imgbasepath
# Upload file
print('Uploading : ' + filename)
resourcename = os.path.split(filename)[-1]
with closing(open(filename, 'rb')) as f:
response = callRESTApi(endpoint, basepath, resourcename, method='PUT', authtoken=authtoken, headers=headers, params=params, data=f, files=files)
print('Uploaded : ' + filename)
jsonResponse = response.text
return jsonResponse
# Read Module Arguments
def readModuleArgs(opts, args):
moduleArgs = {}
moduleArgs['endpoint'] = None
moduleArgs['user'] = None
moduleArgs['password'] = None
moduleArgs['pwdfile'] = None
# Read Module Command Line Arguments.
for opt, arg in opts:
if opt in ("-e", "--endpoint"):
moduleArgs['endpoint'] = arg
elif opt in ("-u", "--user"):
moduleArgs['user'] = arg
elif opt in ("-p", "--password"):
moduleArgs['password'] = arg
elif opt in ("-P", "--pwdfile"):
moduleArgs['pwdfile'] = arg
return moduleArgs
# Main processing function
def main(argv):
# Configure Parameters and Options
options = 'e:u:p:P:'
longOptions = ['endpoint=', 'user=', 'password=', 'pwdfile=']
# Get Options & Arguments
try:
opts, args = getopt.getopt(argv, options, longOptions)
# Read Module Arguments
moduleArgs = readModuleArgs(opts, args)
except getopt.GetoptError:
usage()
except Exception as e:
print('Unknown Exception please check log file')
logging.exception(e)
sys.exit(1)
return
# Main function to kick off processing
if __name__ == "__main__":
main(sys.argv[1:])
| 6,118
| 0
| 181
|
70aa394b1e7534f0761f177159418f6363ceeb78
| 14,891
|
py
|
Python
|
snpy/get_osc.py
|
emirkmo/snpy
|
2a0153c84477ba8a30310d7dbca3d5a8f24de3c6
|
[
"MIT"
] | 6
|
2019-01-14T19:40:45.000Z
|
2021-06-05T12:19:39.000Z
|
snpy/get_osc.py
|
emirkmo/snpy
|
2a0153c84477ba8a30310d7dbca3d5a8f24de3c6
|
[
"MIT"
] | 3
|
2017-04-25T20:06:22.000Z
|
2021-06-09T20:46:41.000Z
|
snpy/get_osc.py
|
emirkmo/snpy
|
2a0153c84477ba8a30310d7dbca3d5a8f24de3c6
|
[
"MIT"
] | 8
|
2017-04-25T19:57:57.000Z
|
2021-11-12T11:54:19.000Z
|
'''
Module for SNooPy to download/parse data from the Open Supernova Catalog.
'''
from __future__ import print_function
import six
import json
if six.PY3:
import urllib.request as urllib
else:
import urllib
from astropy.coordinates import Angle
from snpy import sn,lc,fset
from numpy import array,log10
import astropy.units as u
from snpy.filters import spectrum
from snpy.specobj import timespec
# Some well-known publications and their mappings:
pubs = {
'1999AJ....117..707R': # Riess et al. (1999) Standard Photometry
CfAbands,
'2006AJ....131..527J': # Jha et al. (2006) Standard Photometry
CfAbands,
'2009ApJ...700..331H': # Hicken et al. (2009) CfA3 Natural Photometry
CfAbands,
'2012ApJS..200...12H': # Hicken et al. (2012) CfA4 Natural Photometry
CfAbands
}
# telescope,band --> SNooPy filter database
# We do this by matching (band,system,telescope,observatory) info from the
# database to SNooPy filters.
ftrans = {}
ftrans_standard = {}
standard_warnings = {}
for band in ['u','g','r','i','B','V','Y','J','H','K']:
ftrans[(band,"CSP",'',"LCO")] = band
for band in ['U','B','V','R','I']:
ftrans[(band,'','kait2','')] = band+'kait'
for band in ['U','B','V','R','I']:
ftrans[(band,'','kait3','')] = band+'kait'
for band in ['J','H','Ks']:
ftrans[(band,'','PAIRITEL','')] = band+'2m'
for band in ['B','V','R','I']:
ftrans[(band,'','kait4', '')] = band+'kait'
for band in ['U','V','B']:
ftrans[(band, 'Vega','Swift','')] = band+"_UVOT"
for band in ['UVW1','UVW2','UVM2']:
ftrans[(band, 'Vega','Swift','')] = band
for band in ['g','r','i','z']:
ftrans[(band, '', 'PS1','')] = "ps1_"+band
# These are for data in (what I'm assuming) would be standard filters.
# We will issue a warning, though.
for band in ['U','B','V','R','I']:
ftrans_standard[(band,'','','')] = band+"s"
standard_warnings[band] = "Johnson/Kron/Cousins "
for band in ['u','g','r','i','z']:
ftrans_standard[(band,'','','')] = band+"_40"
standard_warnings[band] = "Sloan (APO) "
for band in ["u'","g'","r'","i'","z'"]:
ftrans_standard[(band,'','','')] = band[0]+"_40"
standard_warnings[band] = "Sloan (USNO-40) "
for band in ["J","H","Ks"]:
ftrans_standard[(band[0],'','','')] = band+"2m"
standard_warnings[band[0]] = "2MASS "
# Our own photometric systems:
def CSP_systems(filt, MJD):
'''Given a filter name and MJD date, output the correct telescope and
system information.'''
if filt == "V":
if MJD < 53748.0:
return (dict(telescope='Swope',instrument='Site2',band='V-3014',
zeropoint="{:.4f}".format(fset['V0'].zp)))
elif MJD < 53759.0:
return (dict(telescope='Swope',instrument='Site2',band='V-3009',
zeropoint="{:.4f}".format(fset['V1'].zp)))
elif MJD < 56566.0:
return (dict(telescope='Swope',instrument='Site2',band='V-9844',
zeropoint="{:.4f}".format(fset['V'].zp)))
else:
return (dict(telescope='Swope',instrument='e2v',band='V-9844',
zeropoint="{:.4f}".format(fset['V2'].zp)))
if filt == "Jrc2":
return (dict(telescope='Swope',instrument='RetroCam',band='J',
zeropoint="{:.4f}".format(fset[filt].zp)))
if filt in ['u','g','r','i','B']:
if MJD < 56566.0:
return (dict(telescope='Swope',instrument='Site2',band=filt,
zeropoint="{:.4f}".format(fset[filt].zp)))
else:
return (dict(telescope='Swope',instrument='e2v',band=filt,
zeropoint="{:.4f}".format(fset[filt+'2'].zp)))
if filt in ['Y','J','H']:
if MJD < 55743.0:
return (dict(telescope='Swope',instrument='RetroCam',band=filt,
zeropoint="{:.4f}".format(fset[filt].zp)))
else:
return (dict(telescope='DuPont',instrument='RetroCam',band=filt,
zeropoint="{:.4f}".format(fset[filt+'d'].zp)))
return({})
MJD_offsets = {
'MJD':0,
'JD':-2400000.5
}
warning_message = {
'upperlims_noerr':'Warning: Data lacking errorbars or with upper-limits not imported',
'upperlims':'Warning: Data with upper-limits not imported',
}
OSC_template = '''https://sne.space/astrocats/astrocats/supernovae/output/json/{}.json'''
def get_obj(url, full_data=True, allow_no_errors=False, missing_error=0.01):
'''Attempt to build a SNooPy object from a Open Supernova Catalog server
URL.'''
if url.find('osc:') == 0:
# Try to construct a url based only on a name.
url = OSC_template.format(url.split(':')[1])
try:
uf = urllib.urlopen(url)
except:
return None,"Invalid URL"
try:
d = json.load(uf)
except:
uf.close()
if full_data:
return None,"Failed to decode JSON",None
return None,"Failed to decode JSON"
else:
uf.close()
# We now have the JSON data. Get the info we need
d = list(d.values())[0]
name = d['name']
if 'redshift' not in d or 'ra' not in d or 'dec' not in d:
return None,"No redshift, RA, or DEC found"
zhel = float(d['redshift'][0]['value'])
ra = Angle(" ".join([d['ra'][0]['value'],d['ra'][0]['u_value']])).degree
decl = Angle(" ".join([d['dec'][0]['value'],d['dec'][0]['u_value']])).degree
snobj = sn(name, ra=ra, dec=decl, z=zhel)
# All primary sources
all_sources_dict = [item for item in d['sources'] \
if not item.get('secondary',False)]
all_sources_dict2 = [item for item in d['sources'] \
if item.get('secondary',False)]
all_sources = {}
for source in all_sources_dict:
all_sources[source['alias']] = (source.get('bibcode',''),
source.get('reference',''))
all_sources2 = {}
for source in all_sources_dict2:
all_sources2[source['alias']] = (source.get('bibcode',''),
source.get('reference',''))
# Next, the photometry.
used_sources = []
MJD = {}
mags = {}
emags = {}
sids = {}
known_unknowns = []
unknown_unknowns = []
warnings = []
photometry = d.get('photometry', [])
for p in photometry:
if p.get('upperlimit',False):
continue
t = (p.get('band',''),p.get('system',''),p.get('telescope',''),
p.get('observatory',''))
# Deal with source of photometry
ss = p.get('source').split(',')
this_source = None
for s in ss:
if s in all_sources:
this_source = all_sources[s]
break
if this_source is None:
for s in ss:
if s in all_sources2:
this_source = all_sources2[s]
if this_source is None:
print("Warning: no primary source, skipping")
continue
bibcode = this_source[0]
if bibcode in pubs:
b = pubs[bibcode](t[0],float(p['time']))
elif t in ftrans:
b = ftrans[t]
elif t in ftrans_standard:
b = ftrans_standard[t]
if t not in known_unknowns:
known_unknowns.append(t)
print("Warning: no telescope/system info, assuming ", \
standard_warnings[b[0]], b[0])
elif (t[0],"","","") in ftrans_standard:
b = ftrans_standard[(t[0],"","","")]
if t not in known_unknowns:
known_unknowns.append(t)
print("Warning: telescope/system defined by %s/%s/%s not "\
"recognized, assuming %s %s" %\
(t[1],t[2],t[3],standard_warnings[t[0]],t[0]))
else:
# No idea
if t not in unknown_unknowns:
unknown_unknowns.append(t)
print("Warning: telescope/system defined by %s/%s/%s not "\
"recognized and can't figure out the filter %s" % \
(t[1],t[2],t[3],t[0]))
unknown_unknowns.append(t)
continue
if b not in MJD:
MJD[b] = []
mags[b] = []
emags[b] = []
sids[b] = []
if 'time' in p and 'magnitude' in p:
if not allow_no_errors and 'e_magnitude' not in p and\
'e_lower_magnitude' not in p and 'e_upper_magnitude' not in p:
if 'upperlims' not in warnings: warnings.append('upperlims')
continue
MJD[b].append(float(p['time']))
mags[b].append(float(p['magnitude']))
if 'e_magnitude' in p:
emags[b].append(float(p['e_magnitude']))
elif 'e_lower_magnitude' in p and 'e_upper_magnitude' in p:
emags[b].append((float(p['e_lower_magnitude']) +\
float(p['e_upper_magnitude']))/2)
else:
emags[b].append(missing_error)
elif 'time' in p and 'countrate' in p and 'zeropoint' in p:
if not allow_no_errors and 'e_countrate' not in p:
if 'upperlims' not in warnings: warnings.append('upperlims')
continue
if float(p['countrate']) < 0: continue
MJD[b].append(float(p['time']))
mags[b].append(-2.5*log10(float(p['countrate'])) + \
float(p['zeropoint']))
ec = p.get('e_countrate',None)
if ec is not None:
emags[b].append(1.087*float(p['e_countrate'])/float(p['countrate']))
else:
emags[b].append(missing_error)
else:
if 'upperlims_noerr' not in warnings:
warnings.append('upperlims_noerr')
continue
if this_source not in used_sources:
used_sources.append(this_source)
# At this point we're actually using the photometry, so find source
sid = used_sources.index(this_source)
sids[b].append(sid)
for b in MJD:
if len(MJD[b]) > 0:
snobj.data[b] = lc(snobj, b, array(MJD[b]), array(mags[b]),
array(emags[b]), sids=array(sids[b], dtype=int))
snobj.data[b].time_sort()
snobj.sources = used_sources
snobj.get_restbands()
if len(unknown_unknowns) > 0:
unknown_unknowns = list(set(unknown_unknowns))
print("Warning: the following photometry was not recognized by SNooPy")
print("and was not imported:")
for item in unknown_unknowns:
print(item)
if warnings:
for warning in warnings:
print(warning_message[warning])
# lastly, the spectroscopy
if d.get('spectra',None) is not None:
spectra = []
dates = []
sids = []
for s in d['spectra']:
wu = s.get('u_wavelengths', 'Agnstrom')
fu = s.get('u_fluxes', 'Uncalibrated')
try:
wu = u.Unit(wu)
except ValueError:
print("Warning: unrecognized unit for wavelength: {}".format(wu))
print(" assuming Angstroms")
wu = u.Angstrom
if fu == 'Uncalibrated':
fluxed = False
fu = u.dimensionless_unscaled
else:
try:
fu = u.Unit(fu)
fluxed = True
except ValueError:
print("Warning: unrecognized unit for flux: {}".format(fu))
fluxed = False
fu = u.dimensionless_unscaled
tu = s.get('u_time', 'MJD')
t = float(s['time'])
if tu not in MJD_offsets:
print("Warning: unrecognized time unit: {}".format(tu))
if len(s['time'].split('.')[0]) == 7 and s['time'][0] == '2':
print(" assuming JD")
t = t - 2400000.5
elif len(s['time'].split('.')[0]) == 5 and s['time'][0] == '5':
print(" assuming MJD")
else:
print(" skipping this spectrum.")
continue
w = array([float(item[0]) for item in s['data']])*wu
f = array([float(item[1]) for item in s['data']])*fu
dr = s.get('deredshifted', False)
if dr:
w = w*(1+zhel)
# At this point, we should be able to convert to the units we want
w = w.to('Angstrom').value
if fluxed: f = f.to('erg / (s cm2 Angstrom)')
f = f.value
# source reference
srcs = s.get('source','').split(',')
this_source = None
for src in srcs:
if src in all_sources:
this_source = all_sources[src]
break
if this_source is None:
print("Warning: spectrum has no source")
if this_source not in used_sources:
used_sources.append(this_source)
# At this point we're actually using the spectroscopy, so find source
sid = used_sources.index(this_source)
sids.append(sid)
spectra.append(spectrum(wave=w, flux=f, fluxed=fluxed,
name="Spectrum MJD={:.1f}".format(t)))
dates.append(t)
snobj.sdata = timespec(snobj, dates, spectra)
snobj.sdata.sids = sids
if full_data:
# make a dictionary of the remaining OSC meta data and make it a member
# variable
snobj.osc_meta = {}
for key in d.keys():
if key not in ['name','redshift','ra','dec','sources','photometry',
'spectra']:
snobj.osc_meta[key] = d[key]
return(snobj,'Success')
def to_osc(s, ref=None, bibcode=None, source=1):
'''Given a supernova object, s, output to JSON format suitable for upload to
the OSC.'''
data = {s.name:{"name":s.name}}
if ref or bibcode:
sources = [dict(bibcode=bibcode, name=ref, alias=str(source))]
data['sources'] = sources
phot = []
for filt in s.data:
for i in range(len(s.data[filt].MJD)):
datum = dict(survey='CSP', observatory='LCO')
datum.update(CSP_systems(filt, s.data[filt].MJD[i]))
datum['time'] = "{:.3f}".format(s.data[filt].MJD[i])
datum['u_time'] = "MJD"
datum['magnitude'] = "{:.3f}".format(s.data[filt].mag[i])
flux,eflux = s.data[filt].flux[i],s.data[filt].e_flux[i]
datum['flux'] = "{:.5f}".format(flux)
datum['u_flux'] = "s^-1 cm^-2"
datum['e_flux'] = "{:.5f}".format(eflux)
datum['e_upper_magnitude'] = "{:.3f}".format(
-2.5*log10((flux-eflux)/flux))
datum['e_lower_magnitude'] = "{:.3f}".format(
-2.5*log10(flux/(flux+eflux)))
datum['source'] = "{}".format(source)
phot.append(datum)
data['photometry'] = phot
return json.dumps(data, indent=4)
| 36.05569
| 92
| 0.555906
|
'''
Module for SNooPy to download/parse data from the Open Supernova Catalog.
'''
from __future__ import print_function
import six
import json
if six.PY3:
import urllib.request as urllib
else:
import urllib
from astropy.coordinates import Angle
from snpy import sn,lc,fset
from numpy import array,log10
import astropy.units as u
from snpy.filters import spectrum
from snpy.specobj import timespec
def CfAbands(filt, MJD):
if MJD < 51913.0:
return filt[0]+'s' # standard photometry
elif 51913.0 < MJD < 55058:
if filt[0] == 'U': return 'U4sh'
if filt[0] == 'I': return 'I4sh'
if filt[0] == 'R': return 'R4sh'
return filt[0]+'k1' # natural photometry CfA3 + CfA4 period 1
else:
if filt[0] == 'U': return 'U4sh'
if filt[0] == 'I': return 'I4sh'
if filt[0] == 'R': return 'R4sh'
return filt[0]+'k2' # natural photometry CfA4 period 2
# Some well-known publications and their mappings:
pubs = {
'1999AJ....117..707R': # Riess et al. (1999) Standard Photometry
CfAbands,
'2006AJ....131..527J': # Jha et al. (2006) Standard Photometry
CfAbands,
'2009ApJ...700..331H': # Hicken et al. (2009) CfA3 Natural Photometry
CfAbands,
'2012ApJS..200...12H': # Hicken et al. (2012) CfA4 Natural Photometry
CfAbands
}
# telescope,band --> SNooPy filter database
# We do this by matching (band,system,telescope,observatory) info from the
# database to SNooPy filters.
ftrans = {}
ftrans_standard = {}
standard_warnings = {}
for band in ['u','g','r','i','B','V','Y','J','H','K']:
ftrans[(band,"CSP",'',"LCO")] = band
for band in ['U','B','V','R','I']:
ftrans[(band,'','kait2','')] = band+'kait'
for band in ['U','B','V','R','I']:
ftrans[(band,'','kait3','')] = band+'kait'
for band in ['J','H','Ks']:
ftrans[(band,'','PAIRITEL','')] = band+'2m'
for band in ['B','V','R','I']:
ftrans[(band,'','kait4', '')] = band+'kait'
for band in ['U','V','B']:
ftrans[(band, 'Vega','Swift','')] = band+"_UVOT"
for band in ['UVW1','UVW2','UVM2']:
ftrans[(band, 'Vega','Swift','')] = band
for band in ['g','r','i','z']:
ftrans[(band, '', 'PS1','')] = "ps1_"+band
# These are for data in (what I'm assuming) would be standard filters.
# We will issue a warning, though.
for band in ['U','B','V','R','I']:
ftrans_standard[(band,'','','')] = band+"s"
standard_warnings[band] = "Johnson/Kron/Cousins "
for band in ['u','g','r','i','z']:
ftrans_standard[(band,'','','')] = band+"_40"
standard_warnings[band] = "Sloan (APO) "
for band in ["u'","g'","r'","i'","z'"]:
ftrans_standard[(band,'','','')] = band[0]+"_40"
standard_warnings[band] = "Sloan (USNO-40) "
for band in ["J","H","Ks"]:
ftrans_standard[(band[0],'','','')] = band+"2m"
standard_warnings[band[0]] = "2MASS "
# Our own photometric systems:
def CSP_systems(filt, MJD):
'''Given a filter name and MJD date, output the correct telescope and
system information.'''
if filt == "V":
if MJD < 53748.0:
return (dict(telescope='Swope',instrument='Site2',band='V-3014',
zeropoint="{:.4f}".format(fset['V0'].zp)))
elif MJD < 53759.0:
return (dict(telescope='Swope',instrument='Site2',band='V-3009',
zeropoint="{:.4f}".format(fset['V1'].zp)))
elif MJD < 56566.0:
return (dict(telescope='Swope',instrument='Site2',band='V-9844',
zeropoint="{:.4f}".format(fset['V'].zp)))
else:
return (dict(telescope='Swope',instrument='e2v',band='V-9844',
zeropoint="{:.4f}".format(fset['V2'].zp)))
if filt == "Jrc2":
return (dict(telescope='Swope',instrument='RetroCam',band='J',
zeropoint="{:.4f}".format(fset[filt].zp)))
if filt in ['u','g','r','i','B']:
if MJD < 56566.0:
return (dict(telescope='Swope',instrument='Site2',band=filt,
zeropoint="{:.4f}".format(fset[filt].zp)))
else:
return (dict(telescope='Swope',instrument='e2v',band=filt,
zeropoint="{:.4f}".format(fset[filt+'2'].zp)))
if filt in ['Y','J','H']:
if MJD < 55743.0:
return (dict(telescope='Swope',instrument='RetroCam',band=filt,
zeropoint="{:.4f}".format(fset[filt].zp)))
else:
return (dict(telescope='DuPont',instrument='RetroCam',band=filt,
zeropoint="{:.4f}".format(fset[filt+'d'].zp)))
return({})
MJD_offsets = {
'MJD':0,
'JD':-2400000.5
}
warning_message = {
'upperlims_noerr':'Warning: Data lacking errorbars or with upper-limits not imported',
'upperlims':'Warning: Data with upper-limits not imported',
}
OSC_template = '''https://sne.space/astrocats/astrocats/supernovae/output/json/{}.json'''
def get_obj(url, full_data=True, allow_no_errors=False, missing_error=0.01):
'''Attempt to build a SNooPy object from a Open Supernova Catalog server
URL.'''
if url.find('osc:') == 0:
# Try to construct a url based only on a name.
url = OSC_template.format(url.split(':')[1])
try:
uf = urllib.urlopen(url)
except:
return None,"Invalid URL"
try:
d = json.load(uf)
except:
uf.close()
if full_data:
return None,"Failed to decode JSON",None
return None,"Failed to decode JSON"
else:
uf.close()
# We now have the JSON data. Get the info we need
d = list(d.values())[0]
name = d['name']
if 'redshift' not in d or 'ra' not in d or 'dec' not in d:
return None,"No redshift, RA, or DEC found"
zhel = float(d['redshift'][0]['value'])
ra = Angle(" ".join([d['ra'][0]['value'],d['ra'][0]['u_value']])).degree
decl = Angle(" ".join([d['dec'][0]['value'],d['dec'][0]['u_value']])).degree
snobj = sn(name, ra=ra, dec=decl, z=zhel)
# All primary sources
all_sources_dict = [item for item in d['sources'] \
if not item.get('secondary',False)]
all_sources_dict2 = [item for item in d['sources'] \
if item.get('secondary',False)]
all_sources = {}
for source in all_sources_dict:
all_sources[source['alias']] = (source.get('bibcode',''),
source.get('reference',''))
all_sources2 = {}
for source in all_sources_dict2:
all_sources2[source['alias']] = (source.get('bibcode',''),
source.get('reference',''))
# Next, the photometry.
used_sources = []
MJD = {}
mags = {}
emags = {}
sids = {}
known_unknowns = []
unknown_unknowns = []
warnings = []
photometry = d.get('photometry', [])
for p in photometry:
if p.get('upperlimit',False):
continue
t = (p.get('band',''),p.get('system',''),p.get('telescope',''),
p.get('observatory',''))
# Deal with source of photometry
ss = p.get('source').split(',')
this_source = None
for s in ss:
if s in all_sources:
this_source = all_sources[s]
break
if this_source is None:
for s in ss:
if s in all_sources2:
this_source = all_sources2[s]
if this_source is None:
print("Warning: no primary source, skipping")
continue
bibcode = this_source[0]
if bibcode in pubs:
b = pubs[bibcode](t[0],float(p['time']))
elif t in ftrans:
b = ftrans[t]
elif t in ftrans_standard:
b = ftrans_standard[t]
if t not in known_unknowns:
known_unknowns.append(t)
print("Warning: no telescope/system info, assuming ", \
standard_warnings[b[0]], b[0])
elif (t[0],"","","") in ftrans_standard:
b = ftrans_standard[(t[0],"","","")]
if t not in known_unknowns:
known_unknowns.append(t)
print("Warning: telescope/system defined by %s/%s/%s not "\
"recognized, assuming %s %s" %\
(t[1],t[2],t[3],standard_warnings[t[0]],t[0]))
else:
# No idea
if t not in unknown_unknowns:
unknown_unknowns.append(t)
print("Warning: telescope/system defined by %s/%s/%s not "\
"recognized and can't figure out the filter %s" % \
(t[1],t[2],t[3],t[0]))
unknown_unknowns.append(t)
continue
if b not in MJD:
MJD[b] = []
mags[b] = []
emags[b] = []
sids[b] = []
if 'time' in p and 'magnitude' in p:
if not allow_no_errors and 'e_magnitude' not in p and\
'e_lower_magnitude' not in p and 'e_upper_magnitude' not in p:
if 'upperlims' not in warnings: warnings.append('upperlims')
continue
MJD[b].append(float(p['time']))
mags[b].append(float(p['magnitude']))
if 'e_magnitude' in p:
emags[b].append(float(p['e_magnitude']))
elif 'e_lower_magnitude' in p and 'e_upper_magnitude' in p:
emags[b].append((float(p['e_lower_magnitude']) +\
float(p['e_upper_magnitude']))/2)
else:
emags[b].append(missing_error)
elif 'time' in p and 'countrate' in p and 'zeropoint' in p:
if not allow_no_errors and 'e_countrate' not in p:
if 'upperlims' not in warnings: warnings.append('upperlims')
continue
if float(p['countrate']) < 0: continue
MJD[b].append(float(p['time']))
mags[b].append(-2.5*log10(float(p['countrate'])) + \
float(p['zeropoint']))
ec = p.get('e_countrate',None)
if ec is not None:
emags[b].append(1.087*float(p['e_countrate'])/float(p['countrate']))
else:
emags[b].append(missing_error)
else:
if 'upperlims_noerr' not in warnings:
warnings.append('upperlims_noerr')
continue
if this_source not in used_sources:
used_sources.append(this_source)
# At this point we're actually using the photometry, so find source
sid = used_sources.index(this_source)
sids[b].append(sid)
for b in MJD:
if len(MJD[b]) > 0:
snobj.data[b] = lc(snobj, b, array(MJD[b]), array(mags[b]),
array(emags[b]), sids=array(sids[b], dtype=int))
snobj.data[b].time_sort()
snobj.sources = used_sources
snobj.get_restbands()
if len(unknown_unknowns) > 0:
unknown_unknowns = list(set(unknown_unknowns))
print("Warning: the following photometry was not recognized by SNooPy")
print("and was not imported:")
for item in unknown_unknowns:
print(item)
if warnings:
for warning in warnings:
print(warning_message[warning])
# lastly, the spectroscopy
if d.get('spectra',None) is not None:
spectra = []
dates = []
sids = []
for s in d['spectra']:
wu = s.get('u_wavelengths', 'Agnstrom')
fu = s.get('u_fluxes', 'Uncalibrated')
try:
wu = u.Unit(wu)
except ValueError:
print("Warning: unrecognized unit for wavelength: {}".format(wu))
print(" assuming Angstroms")
wu = u.Angstrom
if fu == 'Uncalibrated':
fluxed = False
fu = u.dimensionless_unscaled
else:
try:
fu = u.Unit(fu)
fluxed = True
except ValueError:
print("Warning: unrecognized unit for flux: {}".format(fu))
fluxed = False
fu = u.dimensionless_unscaled
tu = s.get('u_time', 'MJD')
t = float(s['time'])
if tu not in MJD_offsets:
print("Warning: unrecognized time unit: {}".format(tu))
if len(s['time'].split('.')[0]) == 7 and s['time'][0] == '2':
print(" assuming JD")
t = t - 2400000.5
elif len(s['time'].split('.')[0]) == 5 and s['time'][0] == '5':
print(" assuming MJD")
else:
print(" skipping this spectrum.")
continue
w = array([float(item[0]) for item in s['data']])*wu
f = array([float(item[1]) for item in s['data']])*fu
dr = s.get('deredshifted', False)
if dr:
w = w*(1+zhel)
# At this point, we should be able to convert to the units we want
w = w.to('Angstrom').value
if fluxed: f = f.to('erg / (s cm2 Angstrom)')
f = f.value
# source reference
srcs = s.get('source','').split(',')
this_source = None
for src in srcs:
if src in all_sources:
this_source = all_sources[src]
break
if this_source is None:
print("Warning: spectrum has no source")
if this_source not in used_sources:
used_sources.append(this_source)
# At this point we're actually using the spectroscopy, so find source
sid = used_sources.index(this_source)
sids.append(sid)
spectra.append(spectrum(wave=w, flux=f, fluxed=fluxed,
name="Spectrum MJD={:.1f}".format(t)))
dates.append(t)
snobj.sdata = timespec(snobj, dates, spectra)
snobj.sdata.sids = sids
if full_data:
# make a dictionary of the remaining OSC meta data and make it a member
# variable
snobj.osc_meta = {}
for key in d.keys():
if key not in ['name','redshift','ra','dec','sources','photometry',
'spectra']:
snobj.osc_meta[key] = d[key]
return(snobj,'Success')
def to_osc(s, ref=None, bibcode=None, source=1):
'''Given a supernova object, s, output to JSON format suitable for upload to
the OSC.'''
data = {s.name:{"name":s.name}}
if ref or bibcode:
sources = [dict(bibcode=bibcode, name=ref, alias=str(source))]
data['sources'] = sources
phot = []
for filt in s.data:
for i in range(len(s.data[filt].MJD)):
datum = dict(survey='CSP', observatory='LCO')
datum.update(CSP_systems(filt, s.data[filt].MJD[i]))
datum['time'] = "{:.3f}".format(s.data[filt].MJD[i])
datum['u_time'] = "MJD"
datum['magnitude'] = "{:.3f}".format(s.data[filt].mag[i])
flux,eflux = s.data[filt].flux[i],s.data[filt].e_flux[i]
datum['flux'] = "{:.5f}".format(flux)
datum['u_flux'] = "s^-1 cm^-2"
datum['e_flux'] = "{:.5f}".format(eflux)
datum['e_upper_magnitude'] = "{:.3f}".format(
-2.5*log10((flux-eflux)/flux))
datum['e_lower_magnitude'] = "{:.3f}".format(
-2.5*log10(flux/(flux+eflux)))
datum['source'] = "{}".format(source)
phot.append(datum)
data['photometry'] = phot
return json.dumps(data, indent=4)
| 476
| 0
| 23
|
b838c3e4fd3bce1a2cc716eb2ba8a849168a9356
| 744
|
py
|
Python
|
Day 15 - OOP/main.py
|
secureterminal/100-Days-of-Code
|
04383ae541938d8a551b5aac9a0dad3348a6ef23
|
[
"MIT"
] | 1
|
2022-01-28T13:55:39.000Z
|
2022-01-28T13:55:39.000Z
|
Day 15 - OOP/main.py
|
secureterminal/100-Days-of-Code
|
04383ae541938d8a551b5aac9a0dad3348a6ef23
|
[
"MIT"
] | 1
|
2022-02-02T00:13:18.000Z
|
2022-02-03T11:32:53.000Z
|
Day 15 - OOP/main.py
|
secureterminal/100-Days-of-Code
|
04383ae541938d8a551b5aac9a0dad3348a6ef23
|
[
"MIT"
] | 2
|
2022-02-07T20:49:36.000Z
|
2022-02-19T21:22:15.000Z
|
from menu import Menu, MenuItem
from coffee_maker import CoffeeMaker
from money_machine import MoneyMachine
money_machine = MoneyMachine()
coffee_maker = CoffeeMaker()
menu = Menu()
coffee_maker.report()
money_machine.report()
coffee_machine_is_on = True
while coffee_machine_is_on:
choices = menu.get_items()
user_order = input(f'Please choose a coffee: ({choices})>>> ')
if user_order == 'off':
coffee_machine_is_on = False
elif user_order == 'report':
coffee_maker.report()
money_machine.report()
else:
drink = menu.find_drink(user_order)
if coffee_maker.is_resource_sufficient(drink) and money_machine.make_payment(drink.cost):
coffee_maker.make_coffee(drink)
| 24.8
| 97
| 0.717742
|
from menu import Menu, MenuItem
from coffee_maker import CoffeeMaker
from money_machine import MoneyMachine
money_machine = MoneyMachine()
coffee_maker = CoffeeMaker()
menu = Menu()
coffee_maker.report()
money_machine.report()
coffee_machine_is_on = True
while coffee_machine_is_on:
choices = menu.get_items()
user_order = input(f'Please choose a coffee: ({choices})>>> ')
if user_order == 'off':
coffee_machine_is_on = False
elif user_order == 'report':
coffee_maker.report()
money_machine.report()
else:
drink = menu.find_drink(user_order)
if coffee_maker.is_resource_sufficient(drink) and money_machine.make_payment(drink.cost):
coffee_maker.make_coffee(drink)
| 0
| 0
| 0
|
fc8a2c85d00bef3bd3bd075b7a046a93e1e9c68c
| 4,269
|
py
|
Python
|
intera_interface/src/intera_interface/digital_io.py
|
thinclab/intera_sdk
|
556de67a88049687404734404e16b147943cde3c
|
[
"Apache-2.0"
] | 38
|
2017-01-20T15:44:22.000Z
|
2022-01-28T15:15:40.000Z
|
intera_interface/src/intera_interface/digital_io.py
|
thinclab/intera_sdk
|
556de67a88049687404734404e16b147943cde3c
|
[
"Apache-2.0"
] | 47
|
2016-12-16T19:41:03.000Z
|
2022-03-21T14:04:04.000Z
|
intera_interface/src/intera_interface/digital_io.py
|
thinclab/intera_sdk
|
556de67a88049687404734404e16b147943cde3c
|
[
"Apache-2.0"
] | 52
|
2017-02-03T13:26:23.000Z
|
2021-03-16T14:25:51.000Z
|
# Copyright (c) 2013-2018, Rethink Robotics Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import rospy
import intera_dataflow
from intera_core_msgs.msg import (
DigitalIOState,
DigitalOutputCommand,
)
class DigitalIO(object):
"""
DEPRECATION WARNING: This interface will likely be removed in
the future. Transition to using the IO Framework and the wrapper
classes: gripper.py, cuff.py, camera.py
Interface class for a simple Digital Input and/or Output on the
Intera robots.
Input
- read input state
Output
- turn output On/Off
- read current output state
"""
def __init__(self, component_id):
"""
Constructor.
@param component_id: unique id of the digital component
"""
self._id = component_id
self._component_type = 'digital_io'
self._is_output = False
self._state = None
self.state_changed = intera_dataflow.Signal()
type_ns = '/robot/' + self._component_type
topic_base = type_ns + '/' + self._id
self._sub_state = rospy.Subscriber(
topic_base + '/state',
DigitalIOState,
self._on_io_state)
intera_dataflow.wait_for(
lambda: self._state != None,
timeout=2.0,
timeout_msg="Failed to get current digital_io state from %s" \
% (topic_base,),
)
# check if output-capable before creating publisher
if self._is_output:
self._pub_output = rospy.Publisher(
type_ns + '/command',
DigitalOutputCommand,
queue_size=10)
def _on_io_state(self, msg):
"""
Updates the internally stored state of the Digital Input/Output.
"""
new_state = (msg.state == DigitalIOState.PRESSED)
if self._state is None:
self._is_output = not msg.isInputOnly
old_state = self._state
self._state = new_state
# trigger signal if changed
if old_state is not None and old_state != new_state:
self.state_changed(new_state)
@property
def is_output(self):
"""
Accessor to check if IO is capable of output.
"""
return self._is_output
@property
def state(self):
"""
Current state of the Digital Input/Output.
"""
return self._state
@state.setter
def state(self, value):
"""
Control the state of the Digital Output. (is_output must be True)
@type value: bool
@param value: new state to output {True, False}
"""
self.set_output(value)
def set_output(self, value, timeout=2.0):
"""
Control the state of the Digital Output.
Use this function for finer control over the wait_for timeout.
@type value: bool
@param value: new state {True, False} of the Output.
@type timeout: float
@param timeout: Seconds to wait for the io to reflect command.
If 0, just command once and return. [0]
"""
if not self._is_output:
raise IOError(errno.EACCES, "Component is not an output [%s: %s]" %
(self._component_type, self._id))
cmd = DigitalOutputCommand()
cmd.name = self._id
cmd.value = value
self._pub_output.publish(cmd)
if not timeout == 0:
intera_dataflow.wait_for(
test=lambda: self.state == value,
timeout=timeout,
rate=100,
timeout_msg=("Failed to command digital io to: %r" % (value,)),
body=lambda: self._pub_output.publish(cmd)
)
| 29.853147
| 79
| 0.606699
|
# Copyright (c) 2013-2018, Rethink Robotics Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import rospy
import intera_dataflow
from intera_core_msgs.msg import (
DigitalIOState,
DigitalOutputCommand,
)
class DigitalIO(object):
"""
DEPRECATION WARNING: This interface will likely be removed in
the future. Transition to using the IO Framework and the wrapper
classes: gripper.py, cuff.py, camera.py
Interface class for a simple Digital Input and/or Output on the
Intera robots.
Input
- read input state
Output
- turn output On/Off
- read current output state
"""
def __init__(self, component_id):
"""
Constructor.
@param component_id: unique id of the digital component
"""
self._id = component_id
self._component_type = 'digital_io'
self._is_output = False
self._state = None
self.state_changed = intera_dataflow.Signal()
type_ns = '/robot/' + self._component_type
topic_base = type_ns + '/' + self._id
self._sub_state = rospy.Subscriber(
topic_base + '/state',
DigitalIOState,
self._on_io_state)
intera_dataflow.wait_for(
lambda: self._state != None,
timeout=2.0,
timeout_msg="Failed to get current digital_io state from %s" \
% (topic_base,),
)
# check if output-capable before creating publisher
if self._is_output:
self._pub_output = rospy.Publisher(
type_ns + '/command',
DigitalOutputCommand,
queue_size=10)
def _on_io_state(self, msg):
"""
Updates the internally stored state of the Digital Input/Output.
"""
new_state = (msg.state == DigitalIOState.PRESSED)
if self._state is None:
self._is_output = not msg.isInputOnly
old_state = self._state
self._state = new_state
# trigger signal if changed
if old_state is not None and old_state != new_state:
self.state_changed(new_state)
@property
def is_output(self):
"""
Accessor to check if IO is capable of output.
"""
return self._is_output
@property
def state(self):
"""
Current state of the Digital Input/Output.
"""
return self._state
@state.setter
def state(self, value):
"""
Control the state of the Digital Output. (is_output must be True)
@type value: bool
@param value: new state to output {True, False}
"""
self.set_output(value)
def set_output(self, value, timeout=2.0):
"""
Control the state of the Digital Output.
Use this function for finer control over the wait_for timeout.
@type value: bool
@param value: new state {True, False} of the Output.
@type timeout: float
@param timeout: Seconds to wait for the io to reflect command.
If 0, just command once and return. [0]
"""
if not self._is_output:
raise IOError(errno.EACCES, "Component is not an output [%s: %s]" %
(self._component_type, self._id))
cmd = DigitalOutputCommand()
cmd.name = self._id
cmd.value = value
self._pub_output.publish(cmd)
if not timeout == 0:
intera_dataflow.wait_for(
test=lambda: self.state == value,
timeout=timeout,
rate=100,
timeout_msg=("Failed to command digital io to: %r" % (value,)),
body=lambda: self._pub_output.publish(cmd)
)
| 0
| 0
| 0
|
07a2a8bad2c82e238b18e385c8b1b2d9e1a12999
| 2,535
|
py
|
Python
|
tests/models/mysql_dumps_test.py
|
ywlianghang/mysql_streamer
|
7fc85efaca3db6a387ea4b791632c2df2d04cb3e
|
[
"Apache-2.0"
] | 419
|
2016-11-17T18:41:47.000Z
|
2022-03-14T02:50:02.000Z
|
tests/models/mysql_dumps_test.py
|
ywlianghang/mysql_streamer
|
7fc85efaca3db6a387ea4b791632c2df2d04cb3e
|
[
"Apache-2.0"
] | 19
|
2016-11-30T18:09:00.000Z
|
2019-04-02T06:20:02.000Z
|
tests/models/mysql_dumps_test.py
|
ywlianghang/mysql_streamer
|
7fc85efaca3db6a387ea4b791632c2df2d04cb3e
|
[
"Apache-2.0"
] | 90
|
2016-11-23T06:26:20.000Z
|
2022-01-22T09:24:42.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from replication_handler.models.mysql_dumps import MySQLDumps
@pytest.mark.itest
@pytest.mark.itest_db
| 28.483146
| 77
| 0.672189
|
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from replication_handler.models.mysql_dumps import MySQLDumps
@pytest.mark.itest
@pytest.mark.itest_db
class TestMySQLDumps(object):
@pytest.fixture
def cluster_name(self):
return 'yelp_main'
@pytest.fixture
def test_dump(self):
return 'This is a test dump'
@pytest.yield_fixture
def initialize_dump(
self,
sandbox_session,
cluster_name,
test_dump
):
assert MySQLDumps.dump_exists(sandbox_session, cluster_name) is False
test_mysql_dump = MySQLDumps.update_mysql_dump(
session=sandbox_session,
database_dump=test_dump,
cluster_name=cluster_name
)
sandbox_session.flush()
assert MySQLDumps.dump_exists(sandbox_session, cluster_name) is True
yield test_mysql_dump
def test_get_latest_mysql_dump(
self,
initialize_dump,
cluster_name,
test_dump,
sandbox_session
):
new_dump = 'This is a new dump'
retrieved_dump = MySQLDumps.get_latest_mysql_dump(
session=sandbox_session,
cluster_name=cluster_name
)
assert retrieved_dump == test_dump
MySQLDumps.update_mysql_dump(
session=sandbox_session,
database_dump=new_dump,
cluster_name=cluster_name
)
returned_new_dump = MySQLDumps.get_latest_mysql_dump(
session=sandbox_session,
cluster_name=cluster_name
)
assert returned_new_dump == new_dump
MySQLDumps.delete_mysql_dump(
session=sandbox_session,
cluster_name=cluster_name
)
dump_exists = MySQLDumps.dump_exists(
session=sandbox_session,
cluster_name=cluster_name
)
assert not dump_exists
| 1,533
| 182
| 22
|
0ec5fc82f6363d39869fe20305aa7077435f30d4
| 1,232
|
py
|
Python
|
WORK/working/crime_vis/crime.py
|
jessicagtz/WorkingFolder
|
4791618e1ec12b9cc38a6ceb1ff03bab1799b0bc
|
[
"MIT"
] | null | null | null |
WORK/working/crime_vis/crime.py
|
jessicagtz/WorkingFolder
|
4791618e1ec12b9cc38a6ceb1ff03bab1799b0bc
|
[
"MIT"
] | null | null | null |
WORK/working/crime_vis/crime.py
|
jessicagtz/WorkingFolder
|
4791618e1ec12b9cc38a6ceb1ff03bab1799b0bc
|
[
"MIT"
] | 1
|
2018-12-06T21:33:44.000Z
|
2018-12-06T21:33:44.000Z
|
# import dependencies
from flask import Flask, jsonify, render_template, request, redirect
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
import pandas as pd
import numpy as np
import datetime as dt
# database setup using automap
engine = create_engine("sqlite:///chi_db.sqlite")
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save references to the tables
AllCrime = Base.classes.all_crime
# Create our session (link) from Python to the DB
session = Session(engine)
# initialize Flask
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///chi_db.sqlite"
@app.route("/crimehistory")
if __name__ == "__main__":
app.run(debug=True)
| 27.377778
| 117
| 0.730519
|
# import dependencies
from flask import Flask, jsonify, render_template, request, redirect
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
import pandas as pd
import numpy as np
import datetime as dt
# database setup using automap
engine = create_engine("sqlite:///chi_db.sqlite")
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save references to the tables
AllCrime = Base.classes.all_crime
# Create our session (link) from Python to the DB
session = Session(engine)
# initialize Flask
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///chi_db.sqlite"
@app.route("/crimehistory")
def crime_dict(crime):
results = session.query.(AllCrime.id, AllCrime.crimeGroup, AllCrime.year,AllCrime.nunCrimes).filter(AllCrimes.id)
dict=[]
for result in results:
crime_dict= {}
crime_dict["year"] = result.year
crime_dict["id"] = result.id
crime_dict["crimeGroup"] = result.crimeGroup
crime_dict["nunCrimes"] = result.nunCrimes
dict.append(crime_dict)
return jsonify(dict)
if __name__ == "__main__":
app.run(debug=True)
| 421
| 0
| 22
|
60d2134f1b978a5ccd35690d147a761894f25efe
| 19,494
|
py
|
Python
|
easyocr/easyocr.py
|
ghandic/EasyOCR
|
f96bea526e7208e4630a18698c18d0223e2a1168
|
[
"Apache-2.0"
] | 1
|
2021-07-19T03:17:50.000Z
|
2021-07-19T03:17:50.000Z
|
easyocr/easyocr.py
|
ghandic/EasyOCR
|
f96bea526e7208e4630a18698c18d0223e2a1168
|
[
"Apache-2.0"
] | null | null | null |
easyocr/easyocr.py
|
ghandic/EasyOCR
|
f96bea526e7208e4630a18698c18d0223e2a1168
|
[
"Apache-2.0"
] | 1
|
2020-10-24T11:40:29.000Z
|
2020-10-24T11:40:29.000Z
|
# -*- coding: utf-8 -*-
import os
import sys
from logging import getLogger
from typing import Any, List, Tuple
import cv2
import numpy as np
import torch
from bidi.algorithm import get_display
from .detection import get_detector, get_textbox
from .imgproc import loadImage
from .recognition import get_recognizer, get_text
from .settings import *
from .utils import calculate_md5, download_and_unzip, get_image_list, get_paragraph, group_text_box
if sys.version_info[0] == 2:
from io import open
from six.moves.urllib.request import urlretrieve
from pathlib2 import Path
else:
from urllib.request import urlretrieve
from pathlib import Path
LOGGER = getLogger(__name__)
| 46.194313
| 123
| 0.601005
|
# -*- coding: utf-8 -*-
import os
import sys
from logging import getLogger
from typing import Any, List, Tuple
import cv2
import numpy as np
import torch
from bidi.algorithm import get_display
from .detection import get_detector, get_textbox
from .imgproc import loadImage
from .recognition import get_recognizer, get_text
from .settings import *
from .utils import calculate_md5, download_and_unzip, get_image_list, get_paragraph, group_text_box
if sys.version_info[0] == 2:
from io import open
from six.moves.urllib.request import urlretrieve
from pathlib2 import Path
else:
from urllib.request import urlretrieve
from pathlib import Path
LOGGER = getLogger(__name__)
class Reader(object):
def __init__(
self, lang_list: List[str], gpu: bool = True, model_storage_directory: str = None, download_enabled: bool = True
):
"""Create an EasyOCR Reader.
Args:
lang_list (List[str]): Language codes (ISO 639) for languages to be recognized during analysis.
gpu (bool, optional): Enable GPU support. Defaults to True.
model_storage_directory (str, optional): Path to directory for model data. If not specified,
models will be read from a directory as defined by the environment variable
EASYOCR_MODULE_PATH (preferred), MODULE_PATH (if defined), or ~/.EasyOCR/. Defaults to None.
download_enabled (bool, optional): Enabled downloading of model data via HTTP. Defaults to True.
"""
self._set_device(gpu)
self._set_model_lang(lang_list)
self._set_character_choices()
self._set_lang_char(lang_list) # self.lang_list doesn't seem to be used
self._set_model_paths(model_storage_directory)
self._download_models(download_enabled)
self.detector = get_detector(self._detector_path, self.device)
self.recognizer, self.converter = get_recognizer(
input_channel,
output_channel,
hidden_size,
self.character,
self.separator_list,
self.dict_list,
self._recognition_model_path,
device=self.device,
)
def readtext(
self,
image: Any,
decoder: str = "greedy",
beamWidth: int = 5,
batch_size: int = 1,
workers: int = 0,
allowlist: List[str] = None,
blocklist: List[str] = None,
detail: int = 1,
paragraph: bool = False,
contrast_ths: float = 0.1,
adjust_contrast: float = 0.5,
filter_ths: float = 0.003,
text_threshold: float = 0.7,
low_text: float = 0.4,
link_threshold: float = 0.4,
canvas_size: int = 2560,
mag_ratio: float = 1.0,
slope_ths: float = 0.1,
ycenter_ths: float = 0.5,
height_ths: float = 0.5,
width_ths: float = 0.5,
add_margin: float = 0.1,
) -> List: # TODO: ghandic - unsure on output shape
"""[summary] # TODO
Args:
image (Any): [description]
decoder (str, optional): [description]. Defaults to "greedy".
beamWidth (int, optional): [description]. Defaults to 5.
batch_size (int, optional): [description]. Defaults to 1.
workers (int, optional): [description]. Defaults to 0.
allowlist (List[str], optional): [description]. Defaults to None.
blocklist (List[str], optional): [description]. Defaults to None.
detail (int, optional): [description]. Defaults to 1.
paragraph (bool, optional): [description]. Defaults to False.
contrast_ths (float, optional): [description]. Defaults to 0.1.
adjust_contrast (float, optional): [description]. Defaults to 0.5.
filter_ths (float, optional): [description]. Defaults to 0.003.
text_threshold (float, optional): [description]. Defaults to 0.7.
low_text (float, optional): [description]. Defaults to 0.4.
link_threshold (float, optional): [description]. Defaults to 0.4.
canvas_size (int, optional): [description]. Defaults to 2560.
mag_ratio (float, optional): [description]. Defaults to 1.0.
slope_ths (float, optional): [description]. Defaults to 0.1.
ycenter_ths (float, optional): [description]. Defaults to 0.5.
height_ths (float, optional): [description]. Defaults to 0.5.
width_ths (float, optional): [description]. Defaults to 0.5.
add_margin (float, optional): [description]. Defaults to 0.1.
Returns:
List: [description]
"""
img, img_cv_grey = self._load_image(image)
text_box = get_textbox(
self.detector, img, canvas_size, mag_ratio, text_threshold, link_threshold, low_text, False, self.device
)
horizontal_list, free_list = group_text_box(text_box, slope_ths, ycenter_ths, height_ths, width_ths, add_margin)
# should add filter to screen small box out
image_list, max_width = get_image_list(horizontal_list, free_list, img_cv_grey, model_height=imgH)
if allowlist:
ignore_char = "".join(set(self.character) - set(allowlist))
elif blocklist:
ignore_char = "".join(set(blocklist))
else:
ignore_char = "".join(set(self.character) - set(self.lang_char))
if self.model_lang in ["chinese_tra", "chinese_sim", "japanese", "korean"]:
decoder = "greedy"
result = get_text(
self.character,
imgH,
int(max_width),
self.recognizer,
self.converter,
image_list,
ignore_char,
decoder,
beamWidth,
batch_size,
contrast_ths,
adjust_contrast,
filter_ths,
workers,
self.device,
)
if self.model_lang == "arabic":
direction_mode = "rtl"
result = [list(item) for item in result]
for item in result:
item[1] = get_display(item[1])
else:
direction_mode = "ltr"
if paragraph:
result = get_paragraph(result, mode=direction_mode)
if detail == 0:
return [item[1] for item in result]
else:
return result
def _load_image(self, image: Any) -> Tuple[np.ndarray, np.ndarray]:
if type(image) == str:
if image.startswith("http://") or image.startswith("https://"):
tmp, _ = urlretrieve(image)
img_cv_grey = cv2.imread(tmp, cv2.IMREAD_GRAYSCALE)
os.remove(tmp)
else:
img_cv_grey = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
image = os.path.expanduser(image)
img = loadImage(image) # can accept URL
elif type(image) == bytes:
nparr = np.frombuffer(image, np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_cv_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
elif type(image) == np.ndarray:
if len(image.shape) == 2: # grayscale
img_cv_grey = image
img = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
elif len(image.shape) == 3: # BGRscale
img = image
img_cv_grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
raise TypeError("Could not load image")
return img, img_cv_grey
def _download_models(self, download_enabled):
corrupt_msg = "MD5 hash mismatch, possible file corruption"
if os.path.isfile(self._detector_path) == False:
if not download_enabled:
raise FileNotFoundError("Missing %s and downloads disabled" % self._detector_path)
LOGGER.warning(
"Downloading detection model, please wait. "
"This may take several minutes depending upon your network connection."
)
download_and_unzip(model_url["detector"][0], DETECTOR_FILENAME, self.model_storage_directory)
assert calculate_md5(self._detector_path) == model_url["detector"][1], corrupt_msg
LOGGER.info("Download complete")
elif calculate_md5(self._detector_path) != model_url["detector"][1]:
if not download_enabled:
raise FileNotFoundError("MD5 mismatch for %s and downloads disabled" % self._detector_path)
LOGGER.warning(corrupt_msg)
os.remove(self._detector_path)
LOGGER.warning(
"Re-downloading the detection model, please wait. "
"This may take several minutes depending upon your network connection."
)
download_and_unzip(model_url["detector"][0], DETECTOR_FILENAME, self.model_storage_directory)
assert calculate_md5(self._detector_path) == model_url["detector"][1], corrupt_msg
# check model file
if os.path.isfile(self._recognition_model_path) == False:
if not download_enabled:
raise FileNotFoundError("Missing %s and downloads disabled" % self._recognition_model_path)
LOGGER.warning(
"Downloading recognition model, please wait. "
"This may take several minutes depending upon your network connection."
)
download_and_unzip(
model_url[self._recognition_model_file][0], self._recognition_model_file, self.model_storage_directory
)
assert (
calculate_md5(self._recognition_model_path) == model_url[self._recognition_model_file][1]
), corrupt_msg
LOGGER.info("Download complete.")
elif calculate_md5(self._recognition_model_path) != model_url[self._recognition_model_file][1]:
if not download_enabled:
raise FileNotFoundError("MD5 mismatch for %s and downloads disabled" % self._recognition_model_path)
LOGGER.warning(corrupt_msg)
os.remove(self._recognition_model_path)
LOGGER.warning(
"Re-downloading the recognition model, please wait. "
"This may take several minutes depending upon your network connection."
)
download_and_unzip(
model_url[self._recognition_model_file][0], self._recognition_model_file, self.model_storage_directory
)
assert (
calculate_md5(self._recognition_model_path) == model_url[self._recognition_model_file][1]
), corrupt_msg
LOGGER.info("Download complete")
def _set_lang_char(self, lang_list: List[str]):
self.dict_list = {}
for lang in lang_list:
self.dict_list[lang] = os.path.join(BASE_PATH, "dict", lang + ".txt")
self.lang_char = []
for lang in lang_list:
char_file = os.path.join(BASE_PATH, "character", lang + "_char.txt")
with open(char_file, "r", encoding="utf-8-sig") as input_file:
char_list = input_file.read().splitlines()
self.lang_char += char_list
self.lang_char = set(self.lang_char).union(set(number + symbol))
self.lang_char = "".join(self.lang_char)
def _set_model_lang(self, lang_list: List[str]):
# check available languages
unknown_lang = set(lang_list) - set(all_lang_list)
if unknown_lang != set():
raise ValueError(unknown_lang, "is not supported")
# choose model
if "th" in lang_list:
self.model_lang = "thai"
if set(lang_list) - set(["th", "en"]) != set():
raise ValueError('Thai is only compatible with English, try lang_list=["th","en"]')
elif "ch_tra" in lang_list:
self.model_lang = "chinese_tra"
if set(lang_list) - set(["ch_tra", "en"]) != set():
raise ValueError('Chinese is only compatible with English, try lang_list=["ch_tra","en"]')
elif "ch_sim" in lang_list:
self.model_lang = "chinese_sim"
if set(lang_list) - set(["ch_sim", "en"]) != set():
raise ValueError('Chinese is only compatible with English, try lang_list=["ch_sim","en"]')
elif "ja" in lang_list:
self.model_lang = "japanese"
if set(lang_list) - set(["ja", "en"]) != set():
raise ValueError('Japanese is only compatible with English, try lang_list=["ja","en"]')
elif "ko" in lang_list:
self.model_lang = "korean"
if set(lang_list) - set(["ko", "en"]) != set():
raise ValueError('Korean is only compatible with English, try lang_list=["ko","en"]')
elif "ta" in lang_list:
self.model_lang = "tamil"
if set(lang_list) - set(["ta", "en"]) != set():
raise ValueError('Tamil is only compatible with English, try lang_list=["ta","en"]')
elif set(lang_list) & set(arabic_lang_list):
self.model_lang = "arabic"
if set(lang_list) - set(arabic_lang_list + ["en"]) != set():
raise ValueError('Arabic is only compatible with English, try lang_list=["ar","fa","ur","ug","en"]')
elif set(lang_list) & set(devanagari_lang_list):
self.model_lang = "devanagari"
if set(lang_list) - set(devanagari_lang_list + ["en"]) != set():
raise ValueError('Devanagari is only compatible with English, try lang_list=["hi","mr","ne","en"]')
elif set(lang_list) & set(cyrillic_lang_list):
self.model_lang = "cyrillic"
if set(lang_list) - set(cyrillic_lang_list + ["en"]) != set():
raise ValueError(
'Cyrillic is only compatible with English, try lang_list=["ru","rs_cyrillic","be","bg","uk","mn","en"]'
)
else:
self.model_lang = "latin"
def _set_character_choices(self):
self.separator_list = {}
if self.model_lang == "latin":
all_char = (
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "ÀÁÂÃÄÅÆÇÈÉÊËÍÎÑÒÓÔÕÖØÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿąęĮįıŁłŒœŠšųŽž"
)
self.character = number + symbol + all_char
self._recognition_model_file = "latin.pth"
elif self.model_lang == "arabic":
ar_number = "٠١٢٣٤٥٦٧٨٩"
ar_symbol = "«»؟،؛"
ar_char = "ءآأؤإئااًبةتثجحخدذرزسشصضطظعغفقكلمنهوىيًٌٍَُِّْٰٓٔٱٹپچڈڑژکڭگںھۀہۂۃۆۇۈۋیېےۓە"
self.character = number + symbol + en_char + ar_number + ar_symbol + ar_char
self._recognition_model_file = "arabic.pth"
elif self.model_lang == "cyrillic":
cyrillic_char = (
"ЁЂЄІЇЈЉЊЋЎЏАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяёђєіїјљњћўџҐґҮүө"
)
self.character = number + symbol + en_char + cyrillic_char
self._recognition_model_file = "cyrillic.pth"
elif self.model_lang == "devanagari":
devanagari_char = (
".ँंःअअंअःआइईउऊऋएऐऑओऔकखगघङचछजझञटठडढणतथदधनऩपफबभमयरऱलळवशषसह़ािीुूृॅेैॉोौ्ॐ॒क़ख़ग़ज़ड़ढ़फ़ॠ।०१२३४५६७८९॰"
)
self.character = number + symbol + en_char + devanagari_char
self._recognition_model_file = "devanagari.pth"
elif self.model_lang == "chinese_tra":
char_file = os.path.join(BASE_PATH, "character", "ch_tra_char.txt")
with open(char_file, "r", encoding="utf-8-sig") as input_file:
ch_tra_list = input_file.read().splitlines()
ch_tra_char = "".join(ch_tra_list)
self.character = number + symbol + en_char + ch_tra_char
self._recognition_model_file = "chinese.pth"
elif self.model_lang == "chinese_sim":
char_file = os.path.join(BASE_PATH, "character", "ch_sim_char.txt")
with open(char_file, "r", encoding="utf-8-sig") as input_file:
ch_sim_list = input_file.read().splitlines()
ch_sim_char = "".join(ch_sim_list)
self.character = number + symbol + en_char + ch_sim_char
self._recognition_model_file = "chinese_sim.pth"
elif self.model_lang == "japanese":
char_file = os.path.join(BASE_PATH, "character", "ja_char.txt")
with open(char_file, "r", encoding="utf-8-sig") as input_file:
ja_list = input_file.read().splitlines()
ja_char = "".join(ja_list)
self.character = number + symbol + en_char + ja_char
self._recognition_model_file = "japanese.pth"
elif self.model_lang == "korean":
char_file = os.path.join(BASE_PATH, "character", "ko_char.txt")
with open(char_file, "r", encoding="utf-8-sig") as input_file:
ko_list = input_file.read().splitlines()
ko_char = "".join(ko_list)
self.character = number + symbol + en_char + ko_char
self._recognition_model_file = "korean.pth"
elif self.model_lang == "tamil":
char_file = os.path.join(BASE_PATH, "character", "ta_char.txt")
with open(char_file, "r", encoding="utf-8-sig") as input_file:
ta_list = input_file.read().splitlines()
ta_char = "".join(ta_list)
self.character = number + symbol + en_char + ta_char
self._recognition_model_file = "tamil.pth"
elif self.model_lang == "thai":
self.separator_list = {"th": ["\xa2", "\xa3"], "en": ["\xa4", "\xa5"]}
separator_char = []
for lang, sep in self.separator_list.items():
separator_char += sep
special_c0 = "ุู"
special_c1 = "ิีืึ" + "ั"
special_c2 = "่้๊๋"
special_c3 = "็์"
special_c = special_c0 + special_c1 + special_c2 + special_c3 + "ำ"
th_char = "กขคฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรลวศษสหฬอฮฤ" + "เแโใไะา" + special_c + "ํฺ" + "ฯๆ"
th_number = "0123456789๑๒๓๔๕๖๗๘๙"
self.character = "".join(separator_char) + symbol + en_char + th_char + th_number
self._recognition_model_file = "thai.pth"
else:
LOGGER.error("invalid language")
raise NotImplementedError("invalid language")
def _set_model_paths(self, dir: str):
self.model_storage_directory = MODULE_PATH + "/model"
if dir:
self.model_storage_directory = dir
Path(self.model_storage_directory).mkdir(parents=True, exist_ok=True)
self._recognition_model_path = os.path.join(self.model_storage_directory, self._recognition_model_file)
self._detector_path = os.path.join(self.model_storage_directory, DETECTOR_FILENAME)
def _set_device(self, gpu: bool):
if gpu is False:
self.device = "cpu"
LOGGER.warning("Using CPU. Note: This module is much faster with a GPU.")
elif not torch.cuda.is_available():
self.device = "cpu"
LOGGER.warning("CUDA not available - defaulting to CPU. Note: This module is much faster with a GPU.")
elif gpu is True:
self.device = "cuda"
else:
self.device = gpu
| 13,459
| 5,903
| 23
|
b9ade0befeaaf199c9e1afc1d7f76c7fb111996b
| 740
|
py
|
Python
|
src/proxies/images.py
|
otanadzetsotne/nn-image-similarity
|
8a00c30359e56c4a229942b4b2df6265fa2856a7
|
[
"MIT"
] | null | null | null |
src/proxies/images.py
|
otanadzetsotne/nn-image-similarity
|
8a00c30359e56c4a229942b4b2df6265fa2856a7
|
[
"MIT"
] | null | null | null |
src/proxies/images.py
|
otanadzetsotne/nn-image-similarity
|
8a00c30359e56c4a229942b4b2df6265fa2856a7
|
[
"MIT"
] | null | null | null |
# local
from src.utils.images import ImagesHelper
from src.dtypes import ImagesInner
| 20.555556
| 50
| 0.601351
|
# local
from src.utils.images import ImagesHelper
from src.dtypes import ImagesInner
class ProxyImages:
@staticmethod
def filter_correct(
images: ImagesInner,
) -> ImagesInner:
"""
Filter images and return just corrects
"""
return ImagesHelper.filter_correct(images)
@staticmethod
def filter_error(
images: ImagesInner,
) -> ImagesInner:
"""
Filter images and return just with errors
"""
return ImagesHelper.filter_error(images)
@staticmethod
def has_correct(
images: ImagesInner,
) -> bool:
"""
Check ImagesInner object
"""
return ImagesHelper.has_correct(images)
| 0
| 631
| 23
|
22afb31fa0ba4539038dbf716afbd984f54b90ca
| 6,054
|
py
|
Python
|
code/src/main/python/misconceptions/rUtils/functions.py
|
DynamicCodeSearch/CodeSeer
|
ee985ece7691691585952eb88565f0e08bdc9113
|
[
"MIT"
] | 5
|
2020-04-05T18:04:13.000Z
|
2021-04-13T20:34:19.000Z
|
code/src/main/python/misconceptions/rUtils/functions.py
|
DynamicCodeSearch/CodeSeer
|
ee985ece7691691585952eb88565f0e08bdc9113
|
[
"MIT"
] | 1
|
2020-04-29T21:42:26.000Z
|
2020-05-01T23:45:45.000Z
|
code/src/main/python/misconceptions/rUtils/functions.py
|
DynamicCodeSearch/CodeSeer
|
ee985ece7691691585952eb88565f0e08bdc9113
|
[
"MIT"
] | 3
|
2020-01-27T16:02:14.000Z
|
2021-02-08T13:25:15.000Z
|
import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
__author__ = "bigfatnoob"
import copy
import signal
import time
import re
import rpy2
import rpy2.robjects as robjects
from rpy2 import rinterface
from rpy2.robjects import pandas2ri
from rpy2.robjects.functions import SignatureTranslatedFunction
from collections import OrderedDict
from analysis.helpers import constants as a_consts
from analysis import execute
from misconceptions.common import datatypes
from misconceptions.rUtils import generator, dataframer
from utils import cache
pandas2ri.activate()
rinterface.set_writeconsole_warnerror(None)
rinterface.set_writeconsole_regular(None)
r_source = robjects.r['source']
R_GEN_PREFIX = "gen_func_r_"
FUNC_BODY_REGEX = r'function\s*\(.*?\)\s*((.|\s)+)'
FUNCTION_STORE = "/Users/panzer/Raise/ProgramRepair/CodeSeer/code/src/main/python/expt/r_functions.pkl"
| 28.422535
| 107
| 0.743971
|
import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
__author__ = "bigfatnoob"
import copy
import signal
import time
import re
import rpy2
import rpy2.robjects as robjects
from rpy2 import rinterface
from rpy2.robjects import pandas2ri
from rpy2.robjects.functions import SignatureTranslatedFunction
from collections import OrderedDict
from analysis.helpers import constants as a_consts
from analysis import execute
from misconceptions.common import datatypes
from misconceptions.rUtils import generator, dataframer
from utils import cache
pandas2ri.activate()
rinterface.set_writeconsole_warnerror(None)
rinterface.set_writeconsole_regular(None)
r_source = robjects.r['source']
R_GEN_PREFIX = "gen_func_r_"
FUNC_BODY_REGEX = r'function\s*\(.*?\)\s*((.|\s)+)'
FUNCTION_STORE = "/Users/panzer/Raise/ProgramRepair/CodeSeer/code/src/main/python/expt/r_functions.pkl"
def get_R_error_message(exception):
return exception.message.strip()
def get_env_variables(r_file_path):
try:
robjects.r('''
source('%s')
''' % r_file_path)
return robjects.globalenv
except rinterface.RRuntimeError as e:
print("Error while fetching environment variables.\n%s" % get_R_error_message(e))
return None
def r_compile(r_file_path, del_compiled=True):
try:
robjects.r('''
library(compiler)
cmpfile('%s')
''' % r_file_path)
if del_compiled:
compiled_file = r_file_path.rsplit(".", 1)[0] + ".Rc"
cache.delete_file(compiled_file)
return True
except Exception as e:
# print("Error while compilation.\n%s" % get_R_error_message(e))
# error_message = get_R_error_message(e)
# return error_message and "import pandas" in error_message
pass
return False
def get_r_function(r_file_path, func_name):
env_variables = get_env_variables(r_file_path)
if not env_variables:
return None
for name in env_variables.keys():
if name == func_name and isinstance(env_variables[name], SignatureTranslatedFunction):
return env_variables[name]
return None
def get_r_functions(r_file_path):
r_functions = {}
env_variables = get_env_variables(r_file_path)
if not env_variables:
return None
for name in env_variables.keys():
if isinstance(env_variables[name], SignatureTranslatedFunction):
r_functions[name] = env_variables[name]
return r_functions
def get_function_arg_names(r_func):
return list(r_func.formals().names)
def get_function_body(r_func):
func_str = str(r_func).strip()
return re.match(FUNC_BODY_REGEX, func_str).group(1)
def get_r_types(r_func):
formal_args = r_func.formals()
arg_names = get_function_arg_names(r_func)
if formal_args is None or type(formal_args) == rpy2.rinterface.RNULLType:
return None
r_types = OrderedDict()
for arg_name, formal_arg in zip(arg_names, formal_args):
r_types[arg_name] = {"type": rpy2.robjects.vectors.DataFrame}
return r_types
def get_function_as_str(func_name, func):
return ("%s <- %s" % (func_name, str(func))).strip()
def convert_to_R_args(py_args):
r_args = []
for py_arg in py_args:
r_arg = datatypes.convert_py_object_to_r(py_arg)
r_args.append(r_arg)
return r_args
def execute_R_function(r_func, arg):
cloned = convert_to_R_args([copy.deepcopy(x) for x in arg])
prev_signal = signal.getsignal(signal.SIGALRM)
signal.signal(signal.SIGALRM, execute.timeout_handler)
signal.alarm(a_consts.METHOD_WAIT_TIMEOUT)
duration = a_consts.METHOD_WAIT_TIMEOUT * 1000
ret_obj = {"return": None, "errorMessage": None}
try:
start = time.time()
ret = r_func(*cloned)
duration = (time.time() - start) * 1000
ret_obj["return"] = datatypes.convert_r_object_to_py(ret)
except execute.TimeoutException:
ret_obj["errorMessage"] = "Method timed out after %d seconds" % a_consts.METHOD_WAIT_TIMEOUT
except rinterface.RRuntimeError as e:
# print("Error while executing rUtils function %s. Error: %s" % (func_name, e.message))
ret_obj["errorMessage"] = e.message
except Exception as e:
ret_obj["errorMessage"] = e.message
ret_obj["duration"] = duration
signal.alarm(0)
signal.signal(signal.SIGALRM, prev_signal)
return ret_obj
def process_R_function(file_path, func_name, r_func):
print("Processing %s ... " % func_name)
r_types = get_r_types(r_func)
if r_types is None:
return None
args = generator.load_args(r_types)
func_key = generator.make_key(r_types)
results = execute_R_function_on_args(r_func, args)
function_data = {
"name": func_name,
"filePath": file_path,
"inputKey": func_key,
"body": get_function_as_str(func_name, r_func)
}
if results:
function_data["outputs"] = results
return function_data
def execute_R_function_on_args(r_func, args_set):
results = []
is_valid = False
for args in args_set:
result = execute_R_function(r_func, args)
if not is_valid and result.get("return", None) is not None:
is_valid = True
results.append(result)
if not is_valid:
print("Function is invalid")
return None
return results
def save_function(func_data):
saved_funcs = cache.load_pickle(FUNCTION_STORE)
if not saved_funcs:
saved_funcs = {}
saved_funcs[func_data["name"]] = func_data
cache.save_pickle(FUNCTION_STORE, saved_funcs)
def extract_col_names(r_func):
arg_names = get_function_arg_names(r_func)
func_body = get_function_body(r_func)
arg_cols = {}
for arg_name in arg_names:
df = dataframer.extract_col_names(arg_name, func_body)
if df:
arg_cols[arg_name] = df
return arg_cols
def parse_function_for_col_names(func_name, source_file):
all_funcs = get_r_functions(source_file)
r_func = all_funcs[func_name]
return extract_col_names(r_func)
def test_function():
file_path = '/Users/panzer/Raise/ProgramRepair/CodeSeer/projects/src/main/R/Example/PandasR/r_snippets.R'
func_name = 'gen_func_r_drop'
r_functions = get_r_functions(file_path)
r_func = r_functions[func_name]
process_R_function(file_path, func_name, r_func)
| 4,737
| 0
| 391
|
a65d3f0e19e9c311490bb7bc77d8eea9559cd262
| 339
|
py
|
Python
|
bot/plugins/joke.py
|
Preocts/twitch-chat-bot
|
50341c30d8eada4b50634c8f25a9eb0eed681735
|
[
"MIT"
] | 62
|
2019-11-16T22:07:42.000Z
|
2022-03-08T20:50:01.000Z
|
bot/plugins/joke.py
|
Preocts/twitch-chat-bot
|
50341c30d8eada4b50634c8f25a9eb0eed681735
|
[
"MIT"
] | 30
|
2019-03-19T15:05:55.000Z
|
2022-03-24T05:00:53.000Z
|
bot/plugins/joke.py
|
Preocts/twitch-chat-bot
|
50341c30d8eada4b50634c8f25a9eb0eed681735
|
[
"MIT"
] | 56
|
2019-06-08T20:34:31.000Z
|
2022-02-21T20:10:38.000Z
|
from __future__ import annotations
from typing import Match
import pyjokes
from bot.config import Config
from bot.data import command
from bot.data import esc
from bot.data import format_msg
@command('!joke', '!yoke')
| 21.1875
| 61
| 0.764012
|
from __future__ import annotations
from typing import Match
import pyjokes
from bot.config import Config
from bot.data import command
from bot.data import esc
from bot.data import format_msg
@command('!joke', '!yoke')
async def cmd_joke(config: Config, match: Match[str]) -> str:
return format_msg(match, esc(pyjokes.get_joke()))
| 94
| 0
| 22
|
b10aa05fe838d0b0b31227f058840a4db0cf7599
| 11,594
|
py
|
Python
|
ngskit/trim_reads.py
|
kim-lab/NGSKit
|
62f609111ba59b9d7d87dc9979a9a2c57959e297
|
[
"MIT"
] | 1
|
2021-12-10T22:23:50.000Z
|
2021-12-10T22:23:50.000Z
|
ngskit/trim_reads.py
|
kimlaborg/NGSKit
|
62f609111ba59b9d7d87dc9979a9a2c57959e297
|
[
"MIT"
] | null | null | null |
ngskit/trim_reads.py
|
kimlaborg/NGSKit
|
62f609111ba59b9d7d87dc9979a9a2c57959e297
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
import logging
import argparse
import time
import ngskit.barcodes as barcodes
from ngskit.utils import fasta_tools, fastq_tools
#import barcodes
#from utils import fasta_tools, fastq_tools
def trimming(demultiplexed_fastq, barcode, quality_threshold,
trgt_len, output_fmt, output_folder):
"""Extract seq from the FASTAQ demultiplexed files. Trim barcodes + Constant
Parameters
----------
demultiplexed_fastq : str
Path of the demultiplexed fastq file
barcode : barcode.object
Barcode object wiht info about barcode and constant regions
quality_threshold : int
reading quality Threshold, any sequence will be trimmed under that level
trgt_len : int
length in bases of the target sequences.
output_fmt : str
Output format, by default fasta
working_folder : str
Output folder to save files with trimmed sequences
Returns
-------
output format save fasta or fastq
Notes
-----
Result str, in Fasta format
>FASTAQ_ID+ length + Quality
ATGATGGTAGTAGTAGAAAGATAGATGATGATGAT
it will be storage:
/data_path/Sequences/Sample_id.fasta
"""
# Init the output format, retunr a function
logger = logging.getLogger(__name__)
create_folder(output_folder)
#
if output_fmt == 'fasta':
save_seq = fasta_tools.write_fasta_sequence
filehdl_output = open(output_folder+'/'+barcode.id+'.fasta','a')
logger.info('Output file: %s' % (output_folder+'/'+barcode.id+'.fasta'))
if output_fmt == 'fastq':
save_seq = fastq_tools.write_fastq_sequence
filehdl_output = open(output_folder+'/'+barcode.id+'.fastq','a')
logger.info('Output file: %s' % (output_folder+'/'+barcode.id+'.fastq'))
# check barcodes integrity, peplength, fastq
# barcodes_list = barcodes.read(barcode_file)
# Stats
nseqs = 0
ntrimed = 0
# Open Fastq file
with open(demultiplexed_fastq, 'r') as read1:
for read1_id in read1:
# Read 4 by 4
# ID lane info, seq info etc
# Read seq and Quality info
read1_seq, read1_strand, read1_qual = [next(read1) for _ in range(3)]
#Translate the Quality to a list of Integers
qual = [ord(c)-33 for c in read1_qual.strip()]
target_sequence = read1_seq[barcode.b1_len+barcode.c1_len:
barcode.b1_len+barcode.c1_len+trgt_len]
#remove the quality of the barcode and the constant region
target_qual = qual[barcode.b1_len+barcode.c1_len:
barcode.b1_len+barcode.c1_len+trgt_len]
nseqs += 1
# Control
try:
avg_quality = sum(target_qual)/float(len(target_qual))
except ZeroDivisionError:
logger.error('Sequence with no lenght or no score', exc_info=True)
logger.error(read1_seq,read1_qual,target_qual,target_qual,trgt_len)
sys.exit()
if len(target_sequence) == trgt_len and avg_quality >= quality_threshold:
ntrimed += 1
# save output format
# attach Qavgm and length origin to the id
seq_id = '{}_Q:{:.2f}_F:{}'.format(read1_id.strip(), avg_quality, trgt_len)
save_seq([seq_id, target_sequence, target_qual],
file_output=filehdl_output)
# save
else:
# Stats
pass
logger.info('Read %i Sequences' % (nseqs))
logger.info('Trimmed %i Sequences' % (ntrimed))
filehdl_output.close()
def get_options():
"""Get arguments from command line.
Parameters
----------
Returns
-------
"""
parser = argparse.ArgumentParser(description="""
Trimming Fastq sequences tool
Usage Trimming:
%prog -d [demultiplexed Folder]-b [BarCode_file.inp] -q [Quality threshold]\
-m [method] --output_fmt fasta
""")
parser.add_argument('-d', '--input_folder', action="store",
dest="input_folder", default=False, help='Folder \
contains demultiplexed folders and files', required=True)
parser.add_argument('-b', '--barcode_file', action="store",
dest="barcode_file", default=False, help='File that \
contains barcodes and cosntant regions', required=True)
parser.add_argument('-o', '--out_folder', action="store", dest="out_folder",
default='Sequences', help='Output folder, called \
Sequences by default')
# optional Arguments
parser.add_argument('-m', '--trimming_method', action="store",
dest="trimming_method", default='standard', type=str,
choices=['standard',
'dynamic'],
help="""standard Trimm sequences according barcode file configuration, ignores float window output files\n
dynamic Trimm sequences using file lenght label, or output of float window demultiplex """)
# Default 1
parser.add_argument('-q', '--quality', action="store",
dest="quality", default=30, type=int,
help='Quality reading threshold \
(default 30)')
parser.add_argument('--output_fmt', help='Output format, default fasta',
dest='output_fmt', default='fasta', action='store')
parser.add_argument('--force-lenght', help='force a lenght and ignore file label, overwrites dynamic option',
dest='force_lenght', default=False, action='store')
options = parser.parse_args()
return options
def main():
"""Pipeline Control.
Parameters
----------
opts
"""
opts = get_options()
# init logging
time_stamp = time.ctime()
seconds_time = int(time.time())
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%m-%d %H:%M',
filename= opts.input_folder+ '/Logs/Trimming_'+opts.input_folder.rpartition('/')[-1]+'_'+opts.barcode_file+'_{}.log'.format(seconds_time),
filemode='w')
logger = logging.getLogger(__name__)
logger.info('JOB START {4} {1} {2} {0} {3}'.format(*time_stamp.split()))
# DEMULTIPLEX
# Check inputs
# Load Barcodes info
# check barcodes integrity, peplength, fastq
barcodes_list = barcodes.read(opts.barcode_file)
# make output folder
# Init Logging
logger.info('#### TRIMMING ####')
# incompatible
logger.info('Method: {}'.format(opts.trimming_method))
logger.info('Quality threshold: {}'.format(opts.quality))
logger.info('Output format: {}'.format(opts.output_fmt))
#
logger.info('Barcode file: {}'.format(opts.barcode_file))
logger.info('Input folder: {}'.format(opts.input_folder))
output_folder = opts.input_folder+'/'+opts.out_folder
logger.info('Output folder: {}'.format(output_folder))
logger.info('Force target lenght: %s', opts.force_lenght)
# foreach sample in barcodes
for barcode in barcodes_list:
logger.info('Triming Sample: {}'.format(barcode.id))
# folder must == sample id in the barcode
# TODO: need to improve this line, it can be problematic
working_folder = './'+opts.input_folder+'/'+barcode.id+'/'
# get all fastq under the folder
for demultiplexed_fastq in os.listdir(working_folder):
# ToDO: only get fastq files
#ToDo: only those I want (target lenthg)
# if method is dynamic, get all the files in the folder
if opts.trimming_method == 'dynamic':
# To do
# read lenght from the filename
seq_length = get_length_label(demultiplexed_fastq)
# modifiy target size
# Skip empty vectors
if seq_length:
# modify output folder
dir_emultiplexed_fastq = working_folder+demultiplexed_fastq
# trim!
trimming(dir_emultiplexed_fastq,
barcode,
quality_threshold= opts.quality,
trgt_len= seq_length,
output_fmt= opts.output_fmt,
output_folder=output_folder+'_'+str(seq_length))
# raw_name = demultiplexed_file.replace('_F.fastq','')
# read the length from the file
elif opts.trimming_method == 'standard':
# Trim time
dir_emultiplexed_fastq = working_folder+demultiplexed_fastq
# ignore files from dynamic target
seq_length = get_length_label(demultiplexed_fastq)
if seq_length != barcode.trgt_len:
logger.info("file label and barcode lenght are different: %s SKIPPING FILE", demultiplexed_fastq)
continue
else:
logger.info('Triming file: {}'.format(demultiplexed_fastq))
trimming(dir_emultiplexed_fastq,
barcode,
quality_threshold= opts.quality,
trgt_len= barcode.trgt_len,
output_fmt= opts.output_fmt,
output_folder=output_folder)
# add here, multilenghts trimmming
elif opts.trimming_method == 'force':
# Todo: this option can be useful in the future
continue
else:
# unknow method
pass
# DONE
time_stamp = time.ctime()
logger.info('JOB ENDS {4} {1} {2} {0} {3}'.format(*time_stamp.split()))
return
# def main():
# # Read argtments
# opts = get_options()
# # init logging
# time_stamp = time.ctime()
# logging.basicConfig(level=logging.INFO,
# format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
# datefmt='%m-%d %H:%M',
# filename= 'Trimming_'+opts.input_folder+'_'+opts.barcode_file+'_{4}_{1}_{2}_{0}_{3}.log'.format(*time_stamp.split()),
# filemode='w')
# logger = logging.getLogger(__name__)
# logger.info('JOB START {4} {1} {2} {0} {3}'.format(*time_stamp.split()))
# # DEMULTIPLEX
# workflow(opts)
# # DONE
# time_stamp = time.ctime()
# logger.info('JOB ENDS {4} {1} {2} {0} {3}'.format(*time_stamp.split()))
if __name__ == '__main__':
main()
| 35.894737
| 163
| 0.582456
|
#!/usr/bin/env python
import os
import sys
import logging
import argparse
import time
import ngskit.barcodes as barcodes
from ngskit.utils import fasta_tools, fastq_tools
#import barcodes
#from utils import fasta_tools, fastq_tools
def create_folder(output_folder):
# Create output folder
logger = logging.getLogger(__name__)
logger.info('Open folder %s', output_folder)
try:
# by default Sequences
os.makedirs(output_folder)
except OSError:
_ = sys.exc_info()
logger.warning('Warning, Folder %s already exist', output_folder)
return
def trimming(demultiplexed_fastq, barcode, quality_threshold,
trgt_len, output_fmt, output_folder):
"""Extract seq from the FASTAQ demultiplexed files. Trim barcodes + Constant
Parameters
----------
demultiplexed_fastq : str
Path of the demultiplexed fastq file
barcode : barcode.object
Barcode object wiht info about barcode and constant regions
quality_threshold : int
reading quality Threshold, any sequence will be trimmed under that level
trgt_len : int
length in bases of the target sequences.
output_fmt : str
Output format, by default fasta
working_folder : str
Output folder to save files with trimmed sequences
Returns
-------
output format save fasta or fastq
Notes
-----
Result str, in Fasta format
>FASTAQ_ID+ length + Quality
ATGATGGTAGTAGTAGAAAGATAGATGATGATGAT
it will be storage:
/data_path/Sequences/Sample_id.fasta
"""
# Init the output format, retunr a function
logger = logging.getLogger(__name__)
create_folder(output_folder)
#
if output_fmt == 'fasta':
save_seq = fasta_tools.write_fasta_sequence
filehdl_output = open(output_folder+'/'+barcode.id+'.fasta','a')
logger.info('Output file: %s' % (output_folder+'/'+barcode.id+'.fasta'))
if output_fmt == 'fastq':
save_seq = fastq_tools.write_fastq_sequence
filehdl_output = open(output_folder+'/'+barcode.id+'.fastq','a')
logger.info('Output file: %s' % (output_folder+'/'+barcode.id+'.fastq'))
# check barcodes integrity, peplength, fastq
# barcodes_list = barcodes.read(barcode_file)
# Stats
nseqs = 0
ntrimed = 0
# Open Fastq file
with open(demultiplexed_fastq, 'r') as read1:
for read1_id in read1:
# Read 4 by 4
# ID lane info, seq info etc
# Read seq and Quality info
read1_seq, read1_strand, read1_qual = [next(read1) for _ in range(3)]
#Translate the Quality to a list of Integers
qual = [ord(c)-33 for c in read1_qual.strip()]
target_sequence = read1_seq[barcode.b1_len+barcode.c1_len:
barcode.b1_len+barcode.c1_len+trgt_len]
#remove the quality of the barcode and the constant region
target_qual = qual[barcode.b1_len+barcode.c1_len:
barcode.b1_len+barcode.c1_len+trgt_len]
nseqs += 1
# Control
try:
avg_quality = sum(target_qual)/float(len(target_qual))
except ZeroDivisionError:
logger.error('Sequence with no lenght or no score', exc_info=True)
logger.error(read1_seq,read1_qual,target_qual,target_qual,trgt_len)
sys.exit()
if len(target_sequence) == trgt_len and avg_quality >= quality_threshold:
ntrimed += 1
# save output format
# attach Qavgm and length origin to the id
seq_id = '{}_Q:{:.2f}_F:{}'.format(read1_id.strip(), avg_quality, trgt_len)
save_seq([seq_id, target_sequence, target_qual],
file_output=filehdl_output)
# save
else:
# Stats
pass
logger.info('Read %i Sequences' % (nseqs))
logger.info('Trimmed %i Sequences' % (ntrimed))
filehdl_output.close()
def get_length_label(demultiplexed_fastq_file):
logger = logging.getLogger(__name__)
filename, _ = os.path.splitext(demultiplexed_fastq_file)
seq_lenght = filename.split('_')[-2:-1]
logger.info("Label lenght: %s", seq_lenght[0])
return int(seq_lenght[0])
def get_options():
"""Get arguments from command line.
Parameters
----------
Returns
-------
"""
parser = argparse.ArgumentParser(description="""
Trimming Fastq sequences tool
Usage Trimming:
%prog -d [demultiplexed Folder]-b [BarCode_file.inp] -q [Quality threshold]\
-m [method] --output_fmt fasta
""")
parser.add_argument('-d', '--input_folder', action="store",
dest="input_folder", default=False, help='Folder \
contains demultiplexed folders and files', required=True)
parser.add_argument('-b', '--barcode_file', action="store",
dest="barcode_file", default=False, help='File that \
contains barcodes and cosntant regions', required=True)
parser.add_argument('-o', '--out_folder', action="store", dest="out_folder",
default='Sequences', help='Output folder, called \
Sequences by default')
# optional Arguments
parser.add_argument('-m', '--trimming_method', action="store",
dest="trimming_method", default='standard', type=str,
choices=['standard',
'dynamic'],
help="""standard Trimm sequences according barcode file configuration, ignores float window output files\n
dynamic Trimm sequences using file lenght label, or output of float window demultiplex """)
# Default 1
parser.add_argument('-q', '--quality', action="store",
dest="quality", default=30, type=int,
help='Quality reading threshold \
(default 30)')
parser.add_argument('--output_fmt', help='Output format, default fasta',
dest='output_fmt', default='fasta', action='store')
parser.add_argument('--force-lenght', help='force a lenght and ignore file label, overwrites dynamic option',
dest='force_lenght', default=False, action='store')
options = parser.parse_args()
return options
def main():
"""Pipeline Control.
Parameters
----------
opts
"""
opts = get_options()
# init logging
time_stamp = time.ctime()
seconds_time = int(time.time())
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%m-%d %H:%M',
filename= opts.input_folder+ '/Logs/Trimming_'+opts.input_folder.rpartition('/')[-1]+'_'+opts.barcode_file+'_{}.log'.format(seconds_time),
filemode='w')
logger = logging.getLogger(__name__)
logger.info('JOB START {4} {1} {2} {0} {3}'.format(*time_stamp.split()))
# DEMULTIPLEX
# Check inputs
# Load Barcodes info
# check barcodes integrity, peplength, fastq
barcodes_list = barcodes.read(opts.barcode_file)
# make output folder
# Init Logging
logger.info('#### TRIMMING ####')
# incompatible
logger.info('Method: {}'.format(opts.trimming_method))
logger.info('Quality threshold: {}'.format(opts.quality))
logger.info('Output format: {}'.format(opts.output_fmt))
#
logger.info('Barcode file: {}'.format(opts.barcode_file))
logger.info('Input folder: {}'.format(opts.input_folder))
output_folder = opts.input_folder+'/'+opts.out_folder
logger.info('Output folder: {}'.format(output_folder))
logger.info('Force target lenght: %s', opts.force_lenght)
# foreach sample in barcodes
for barcode in barcodes_list:
logger.info('Triming Sample: {}'.format(barcode.id))
# folder must == sample id in the barcode
# TODO: need to improve this line, it can be problematic
working_folder = './'+opts.input_folder+'/'+barcode.id+'/'
# get all fastq under the folder
for demultiplexed_fastq in os.listdir(working_folder):
# ToDO: only get fastq files
#ToDo: only those I want (target lenthg)
# if method is dynamic, get all the files in the folder
if opts.trimming_method == 'dynamic':
# To do
# read lenght from the filename
seq_length = get_length_label(demultiplexed_fastq)
# modifiy target size
# Skip empty vectors
if seq_length:
# modify output folder
dir_emultiplexed_fastq = working_folder+demultiplexed_fastq
# trim!
trimming(dir_emultiplexed_fastq,
barcode,
quality_threshold= opts.quality,
trgt_len= seq_length,
output_fmt= opts.output_fmt,
output_folder=output_folder+'_'+str(seq_length))
# raw_name = demultiplexed_file.replace('_F.fastq','')
# read the length from the file
elif opts.trimming_method == 'standard':
# Trim time
dir_emultiplexed_fastq = working_folder+demultiplexed_fastq
# ignore files from dynamic target
seq_length = get_length_label(demultiplexed_fastq)
if seq_length != barcode.trgt_len:
logger.info("file label and barcode lenght are different: %s SKIPPING FILE", demultiplexed_fastq)
continue
else:
logger.info('Triming file: {}'.format(demultiplexed_fastq))
trimming(dir_emultiplexed_fastq,
barcode,
quality_threshold= opts.quality,
trgt_len= barcode.trgt_len,
output_fmt= opts.output_fmt,
output_folder=output_folder)
# add here, multilenghts trimmming
elif opts.trimming_method == 'force':
# Todo: this option can be useful in the future
continue
else:
# unknow method
pass
# DONE
time_stamp = time.ctime()
logger.info('JOB ENDS {4} {1} {2} {0} {3}'.format(*time_stamp.split()))
return
# def main():
# # Read argtments
# opts = get_options()
# # init logging
# time_stamp = time.ctime()
# logging.basicConfig(level=logging.INFO,
# format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
# datefmt='%m-%d %H:%M',
# filename= 'Trimming_'+opts.input_folder+'_'+opts.barcode_file+'_{4}_{1}_{2}_{0}_{3}.log'.format(*time_stamp.split()),
# filemode='w')
# logger = logging.getLogger(__name__)
# logger.info('JOB START {4} {1} {2} {0} {3}'.format(*time_stamp.split()))
# # DEMULTIPLEX
# workflow(opts)
# # DONE
# time_stamp = time.ctime()
# logger.info('JOB ENDS {4} {1} {2} {0} {3}'.format(*time_stamp.split()))
if __name__ == '__main__':
main()
| 592
| 0
| 46
|
feade5496f17453a160a194c258f2778a56f8b61
| 75
|
py
|
Python
|
checkov/yaml_doc/registry.py
|
pmalkki/checkov
|
b6cdf386dd976fe27c16fed6d550756a678a5d7b
|
[
"Apache-2.0"
] | null | null | null |
checkov/yaml_doc/registry.py
|
pmalkki/checkov
|
b6cdf386dd976fe27c16fed6d550756a678a5d7b
|
[
"Apache-2.0"
] | null | null | null |
checkov/yaml_doc/registry.py
|
pmalkki/checkov
|
b6cdf386dd976fe27c16fed6d550756a678a5d7b
|
[
"Apache-2.0"
] | null | null | null |
from checkov.yaml_doc.base_registry import Registry
registry = Registry()
| 18.75
| 51
| 0.826667
|
from checkov.yaml_doc.base_registry import Registry
registry = Registry()
| 0
| 0
| 0
|
223e2dd85d17fc5cef76030696a77e0b1f297257
| 497
|
py
|
Python
|
api/v1/internal.py
|
anthill-gaming/game_controller
|
849ea700263d7724d7a66907e0961956940e6c64
|
[
"MIT"
] | null | null | null |
api/v1/internal.py
|
anthill-gaming/game_controller
|
849ea700263d7724d7a66907e0961956940e6c64
|
[
"MIT"
] | null | null | null |
api/v1/internal.py
|
anthill-gaming/game_controller
|
849ea700263d7724d7a66907e0961956940e6c64
|
[
"MIT"
] | null | null | null |
"""
Internal api methods for current service.
Example:
from anthill.platform.api.internal import as_internal, InternalAPI
@as_internal()
async def your_internal_api_method(api: InternalAPI, *params, **options):
# current_service = api.service
...
"""
from anthill.platform.api.internal import as_internal, InternalAPI
@as_internal()
@as_internal()
| 20.708333
| 77
| 0.714286
|
"""
Internal api methods for current service.
Example:
from anthill.platform.api.internal import as_internal, InternalAPI
@as_internal()
async def your_internal_api_method(api: InternalAPI, *params, **options):
# current_service = api.service
...
"""
from anthill.platform.api.internal import as_internal, InternalAPI
@as_internal()
async def spawn(api: InternalAPI, **options):
pass
@as_internal()
async def terminate(api: InternalAPI, **options):
pass
| 70
| 0
| 44
|
4842a357559df39c5885b5a0a2d27b724cb94ce7
| 12,748
|
py
|
Python
|
tf-model-manip.py
|
PeiqinSun/tf-tutorials
|
4d3a9560bce018989e62e9146d63e8fe16eaed91
|
[
"Apache-2.0"
] | 184
|
2019-02-25T09:03:30.000Z
|
2020-05-20T12:30:00.000Z
|
tf-model-manip.py
|
megvii-research/tf-tutorials
|
4d3a9560bce018989e62e9146d63e8fe16eaed91
|
[
"Apache-2.0"
] | 73
|
2019-02-28T02:51:14.000Z
|
2020-04-08T10:48:07.000Z
|
tf-model-manip.py
|
PeiqinSun/tf-tutorials
|
4d3a9560bce018989e62e9146d63e8fe16eaed91
|
[
"Apache-2.0"
] | 103
|
2019-02-28T09:05:21.000Z
|
2020-05-18T13:22:10.000Z
|
#!/usr/bin/env mdl
# -*- coding: utf-8 -*-
# =======================================
# File Name :
# Purpose :
# Creation Date :
# Last Modified :
# Created By : sunpeiqin
# =======================================
import os
import sys
import argparse
import magic
import keyword
import importlib
import collections
import re
import tabulate
import numpy as np
import tensorflow as tf
def import_python_source_as_module(fpath, mod_name=None):
""" import a python source as a module; its directory is added to
``sys.path`` during importing, and ``sys.path`` would be restored
afterwards.
Modules newly loaded in the same directory as *fpath* would have an
attribute `__dynamic_loaded_by_spq__` set to 1, and fpath itself would
have that value set to 2.
:type fpath: str
:param fpath: python source file path
:type mod_name: str or None
:param mod_name: target module name; if it exists in `sys.modules`, the
corresponding module would be directly returned; otherwise it is added
to ``sys.modules`` afterward. If it is None, module name would be
derived from *fpath* by replacing '/' to '.' and special chars to '_'
"""
fpath = os.path.realpath(fpath)
if mod_name is None:
# automatically generate mod_name
mod_name = []
for i in fpath.split(os.path.sep):
v = ''
for j in i:
if not j.isidentifier() and not j.isdigit():
j = '_'
v += j
if not v.isidentifier() or keyword.iskeyword(v):
v = '_' + v
assert v.isidentifier() and not keyword.iskeyword(v), (
'failed to convert to python identifier: in={} out={}'.format(
i, v))
mod_name.append(v)
mod_name = '_'.join(mod_name)
if mod_name in sys.modules:
return sys.modules[mod_name]
old_path = sys.path[:]
mod_dir = os.path.dirname(fpath)
sys.path.append(mod_dir)
old_mod_names = set(sys.modules.keys())
try:
final_mod = importlib.machinery.SourceFileLoader(
mod_name, fpath).load_module()
finally:
sys.path.remove(mod_dir)
sys.modules[mod_name] = final_mod
for name, mod in list(sys.modules.items()):
if name in old_mod_names:
continue
try:
fpath = getattr(mod, '__file__', None)
except Exception as exc:
print('caught exception {} while trying to get '
'read __file__ attr from {}'.format(repr(exc), name))
continue
if fpath is not None and (
os.path.dirname(os.path.realpath(fpath)).startswith(mod_dir)):
try:
mod.__dynamic_loaded_by_spq__ = 1
except Exception:
pass
try:
final_mod.__dynamic_loaded_by_spq__ = 2
except Exception:
pass
return final_mod
def load_network(network, get_kwargs={}):
'''load a model defined by model.py'''
network = os.path.realpath(network)
mf = magic.from_file(network, mime=True)
mf = mf.decode('utf-8') if isinstance(mf, bytes) else mf
if mf.startswith('text'):
return import_python_source_as_module(network).Model().build()
else:
print('Only supports a model.py which defines a network')
exit(0)
if __name__ == "__main__":
actions = [InfoAction,]
parser = argparse.ArgumentParser()
parser.add_argument('network')
subparsers = parser.add_subparsers(help='action')
for i in actions:
i.add_subparser(subparsers)
args = parser.parse_args()
# load network
load_network(args.network)
if hasattr(args, 'func'):
args.func(args)
else:
print('no action given')
| 34.361186
| 122
| 0.547145
|
#!/usr/bin/env mdl
# -*- coding: utf-8 -*-
# =======================================
# File Name :
# Purpose :
# Creation Date :
# Last Modified :
# Created By : sunpeiqin
# =======================================
import os
import sys
import argparse
import magic
import keyword
import importlib
import collections
import re
import tabulate
import numpy as np
import tensorflow as tf
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "{:3.3f} {}{}".format(num, unit, suffix)
num /= 1024.0
sign_str = '-' if num < 0 else ''
return "{}{:.1f} {}{}".format(sign_str, num, 'Yi', suffix)
def import_python_source_as_module(fpath, mod_name=None):
""" import a python source as a module; its directory is added to
``sys.path`` during importing, and ``sys.path`` would be restored
afterwards.
Modules newly loaded in the same directory as *fpath* would have an
attribute `__dynamic_loaded_by_spq__` set to 1, and fpath itself would
have that value set to 2.
:type fpath: str
:param fpath: python source file path
:type mod_name: str or None
:param mod_name: target module name; if it exists in `sys.modules`, the
corresponding module would be directly returned; otherwise it is added
to ``sys.modules`` afterward. If it is None, module name would be
derived from *fpath* by replacing '/' to '.' and special chars to '_'
"""
fpath = os.path.realpath(fpath)
if mod_name is None:
# automatically generate mod_name
mod_name = []
for i in fpath.split(os.path.sep):
v = ''
for j in i:
if not j.isidentifier() and not j.isdigit():
j = '_'
v += j
if not v.isidentifier() or keyword.iskeyword(v):
v = '_' + v
assert v.isidentifier() and not keyword.iskeyword(v), (
'failed to convert to python identifier: in={} out={}'.format(
i, v))
mod_name.append(v)
mod_name = '_'.join(mod_name)
if mod_name in sys.modules:
return sys.modules[mod_name]
old_path = sys.path[:]
mod_dir = os.path.dirname(fpath)
sys.path.append(mod_dir)
old_mod_names = set(sys.modules.keys())
try:
final_mod = importlib.machinery.SourceFileLoader(
mod_name, fpath).load_module()
finally:
sys.path.remove(mod_dir)
sys.modules[mod_name] = final_mod
for name, mod in list(sys.modules.items()):
if name in old_mod_names:
continue
try:
fpath = getattr(mod, '__file__', None)
except Exception as exc:
print('caught exception {} while trying to get '
'read __file__ attr from {}'.format(repr(exc), name))
continue
if fpath is not None and (
os.path.dirname(os.path.realpath(fpath)).startswith(mod_dir)):
try:
mod.__dynamic_loaded_by_spq__ = 1
except Exception:
pass
try:
final_mod.__dynamic_loaded_by_spq__ = 2
except Exception:
pass
return final_mod
def load_network(network, get_kwargs={}):
'''load a model defined by model.py'''
network = os.path.realpath(network)
mf = magic.from_file(network, mime=True)
mf = mf.decode('utf-8') if isinstance(mf, bytes) else mf
if mf.startswith('text'):
return import_python_source_as_module(network).Model().build()
else:
print('Only supports a model.py which defines a network')
exit(0)
def compute_receptiveField_and_stride(nodes):
stride_list = []
receptive_field_list = []
new_nodes = collections.OrderedDict()
for k, v_dict in nodes.items():
data_format = v_dict.get('data_format', None)
ksize = v_dict.get('ksize', [])
shape = v_dict.get('shape', [])
strides = v_dict.get('strides', [])
if data_format == 'NHWC':
h_stride, w_stride = strides[1], strides[2]
if ksize:
h_size, w_size = ksize[1], ksize[2]
else:
h_size, w_size = shape[0], shape[1]
elif data_format == 'NCHW':
h_stride, w_stride = strides[2], strides[3]
if ksize:
h_size, w_size = ksize[2], ksize[3]
else:
h_size, w_size = shape[0], shape[1]
else:
continue
if not stride_list:
receptive_field_list.append((h_size, w_size))
stride_list.append((h_stride, w_stride))
else:
pre_s = stride_list[-1]
pre_rf = receptive_field_list[-1]
stride_list.append((h_stride * pre_s[0], w_stride * pre_s[1]))
receptive_field_list.append((h_size * pre_s[0] + pre_rf[0] - pre_s[0],
w_size * pre_s[1] + pre_rf[1] - pre_s[1]))
nodes[k].update({
'receptive_field': receptive_field_list[-1],
'g_stride': stride_list[-1],
})
new_nodes.update({k:nodes[k]})
return new_nodes
class InfoAction:
@classmethod
def add_subparser(cls, subparsers):
parser = subparsers.add_parser(
'info', help='view some summary infomation in text')
parser.set_defaults(func=cls.run)
@classmethod
def run(cls, args):
sess = tf.Session()
sess.run(tf.global_variables_initializer()) # must init graph
cls._cache = collections.OrderedDict()
cls.param_stats(sess)
cls.flops_stats(sess)
cls.summary(sess)
@classmethod
def summary(cls, sess):
data = [['item', 'value']]
data.extend(list(cls._cache.items()))
print('\n'*2)
print('summary\n' + tabulate.tabulate(data))
@classmethod
def param_stats(cls, sess, bar_length_max=20):
tot_param_dim, param_size_bit = 0, 0
data = []
for param in tf.trainable_variables():
value = sess.run(param)
param_dim = np.prod(value.shape)
tot_param_dim += int(param_dim)
nbits = int(re.findall(r"\d+", str(param.dtype))[0])
param_size_bit += param_dim * nbits
# fill data
data.append(dict(
name=param.name,
shape=param.get_shape(),
param_dim=param_dim,
param_type=param.dtype,
size=sizeof_fmt(param_dim * nbits / 8),
size_cum=sizeof_fmt(tot_param_dim * nbits / 8),
mean='{:.2g}'.format(value.mean()),
std='{:.2g}'.format(value.std()),
))
for d in data:
ratio = d['param_dim'] / tot_param_dim
d['ratio'] = ratio
d['percentage'] = '{:.2f}%'.format(ratio * 100)
# construct bar
max_ratio = max([d['ratio'] for d in data])
for d in data:
bar_length = int(d['ratio'] / max_ratio * bar_length_max)
d['size_bar'] = '#' * bar_length
param_size = sizeof_fmt(param_size_bit / 8)
data.append(dict(
name='total',
param_dim=tot_param_dim,
size=param_size,
))
cls._cache['#params'] = len(data)
cls._cache['tot_param_dim'] = tot_param_dim
cls._cache['param_size'] = param_size
cls._param_size = param_size_bit / 8
header = [
'name', 'shape', 'mean', 'std', 'param_dim', 'size', 'size_cum',
'percentage', 'size_bar'
]
# make a table
print('\n'*2)
print('param stats: \n' + tabulate.tabulate(
cls._dict2table(data, header=header)))
@classmethod
def _dict2table(self, list_of_dict, header):
table_data = [header]
for d in list_of_dict:
row = []
for h in header:
v = ''
if h in d:
v = d[h]
row.append(v)
table_data.append(row)
return table_data
@classmethod
def flops_stats(cls, sess, bar_length_max=20):
nodes = [n for n in tf.get_default_graph().as_graph_def(add_shapes=True).node]
cls._cache['#nodes'] = len(nodes)
# get nodes which can affect recept filed and stride
rf_nodes = collections.OrderedDict()
for n in nodes:
if n.op in ['Conv2D', 'VariableV2']:
name_scope = '/'.join(n.name.split('/')[:-1])
if name_scope not in rf_nodes.keys():
rf_nodes[name_scope] = {}
if 'shape' in n.attr.keys() and not rf_nodes[name_scope].get('shape', []):
rf_nodes[name_scope].update(shape=[i.size for i in n.attr['shape'].shape.dim])
if 'strides' in n.attr.keys():
rf_nodes[name_scope].update(strides=list(n.attr['strides'].list.i))
rf_nodes[name_scope].update(data_format=n.attr['data_format'].s.decode('utf-8'))
rf_nodes[name_scope].update(operator=n)
if n.op in ['MaxPool', 'AvgPool']:
rf_nodes[n.name] = {
'ksize': list(n.attr['ksize'].list.i),
'strides': list(n.attr['ksize'].list.i),
'data_format': n.attr['data_format'].s.decode('utf-8'),
'operator': n,
}
rf_nodes = compute_receptiveField_and_stride(rf_nodes)
# find the input node (only data)
for n in nodes:
if n.op == 'Placeholder':
input_shape = [i.size for i in n.attr['shape'].shape.dim][1:]
break
for k, v_dict in rf_nodes.items():
if v_dict['data_format'] == 'NHWC':
v_dict['input_shape'] = input_shape
v_dict['output_shape'] = [i.size for i in v_dict['operator'].attr['_output_shapes'].list.shape[0].dim][1:]
elif v_dict['data_format'] == 'NCHW':
pass
if v_dict['operator'].op in ['Conv2D']:
ic = v_dict['input_shape'][-1]
v_dict['flops'] = np.prod(v_dict['output_shape']) * ic * np.prod(v_dict['shape'][:2])
elif v_dict['operator'].op in ['MaxPool', 'AvgPool']:
v_dict['flops'] = 0
input_shape = v_dict['output_shape']
opr_info = []
total_flops = 0
for k, v_dict in rf_nodes.items():
total_flops += v_dict['flops']
opr_info.append({
'opr_name': v_dict['operator'].name,
'opr_class': v_dict['operator'].op,
'input_shapes': v_dict['input_shape'],
'output_shapes': v_dict['output_shape'],
'flops_num': v_dict['flops'],
'flops_cum': total_flops,
'receptive_field': v_dict['receptive_field'],
'stride': v_dict['g_stride']
})
flops = [i['flops_num'] for i in opr_info]
max_flops = max(flops + [0])
for i in opr_info:
f = i['flops_num']
i['flops'] = sizeof_fmt(f, suffix='OPs')
fc = i['flops_cum']
i['flops_cum'] = sizeof_fmt(fc, suffix='OPs')
r = i['ratio'] = f / total_flops
i['percentage'] = '{:.2f}%'.format(r * 100)
bar_length = int(f / max_flops * bar_length_max)
i['bar'] = '#' * bar_length
header = ['opr_name', 'opr_class', 'input_shapes', 'output_shapes', 'receptive_field',
'stride', 'flops', 'flops_cum', 'percentage', 'bar']
total_flops_str = sizeof_fmt(total_flops, suffix='OPs')
#total_var_size = sum(sum(s[1] for s in i['output_shapes']) for i in opr_info)
opr_info.append(dict(
opr_name='total',
flops=total_flops_str,
#output_shapes=total_var_size
))
cls._cache['total_flops'] = total_flops_str
cls._cache['flops/param_size'] = '{:.3g}'.format(
total_flops / cls._param_size)
print('\n'*2)
print('flops stats: \n' + tabulate.tabulate(
cls._dict2table(opr_info, header=header)))
if __name__ == "__main__":
actions = [InfoAction,]
parser = argparse.ArgumentParser()
parser.add_argument('network')
subparsers = parser.add_subparsers(help='action')
for i in actions:
i.add_subparser(subparsers)
args = parser.parse_args()
# load network
load_network(args.network)
if hasattr(args, 'func'):
args.func(args)
else:
print('no action given')
| 8,612
| 260
| 69
|
d607417a565fc6e36134e72eef7edfbbfe35876d
| 3,985
|
py
|
Python
|
preprocessing.py
|
pedrada88/rwe
|
a3462556a70bd4a51d2978cadc6101e22723356a
|
[
"BSD-Source-Code"
] | 15
|
2019-06-05T21:24:42.000Z
|
2021-01-04T00:30:29.000Z
|
preprocessing.py
|
pedrada88/rwe
|
a3462556a70bd4a51d2978cadc6101e22723356a
|
[
"BSD-Source-Code"
] | null | null | null |
preprocessing.py
|
pedrada88/rwe
|
a3462556a70bd4a51d2978cadc6101e22723356a
|
[
"BSD-Source-Code"
] | 1
|
2022-01-29T16:23:03.000Z
|
2022-01-29T16:23:03.000Z
|
# -*- coding: utf-8 -*-
import numpy as np
import random
#Load embedding vocabulary
#Load embedding vocabulary
#Load embeddings filtered by pre-given vocabulary
#Load embedding matrices input/output
#Split training and development data
| 38.68932
| 108
| 0.673275
|
# -*- coding: utf-8 -*-
import numpy as np
import random
#Load embedding vocabulary
def load_vocab_embeddings(input_path):
first_line=True
vocab=set()
input_file_relations=open(input_file_relations, 'r', encoding='utf-8')
for line in input_file_relations:
if first_line==True:
first_line=False
else:
vocab.add(line.strip().split(" ")[0])
return vocab
#Load embedding vocabulary
def load_word_vocab_from_relation_vectors(input_path):
pre_word_vocab=set()
first_line=True
final_word_vocab=set()
input_file_relations=open(input_path, 'r', encoding='utf-8')
for line in input_file_relations:
linesplit=line.strip().split(" ")
if first_line==True:
first_line=False
else:
relation=linesplit[0]
if "__" not in relation: sys.exit("ERROR: Pair '"+relation+"' does not contain underscore")
relation_split=relation.rsplit("__",1)
word1=relation_split[0]
word2=relation_split[1]
pre_word_vocab.add(word1)
pre_word_vocab.add(word2)
return pre_word_vocab
#Load embeddings filtered by pre-given vocabulary
def load_embeddings_filtered_byvocab(input_path,vocab):
word2index={}
index2word={}
matrix_word_embeddings=[]
first_line=True
input_file_relations=open(input_path, 'r', encoding='utf-8')
cont=0
for line in input_file_relations:
linesplit=line.strip().split(" ")
if first_line==True:
dimensions=int(linesplit[1])
first_line=False
else:
word=linesplit[0]
if word in vocab and word not in word2index:
word2index[word]=cont
index2word[cont]=word
cont+=1
matrix_word_embeddings.append(np.asarray([float(dim) for dim in linesplit[1:dimensions+1]]))
return matrix_word_embeddings,word2index,index2word,dimensions
#Load embedding matrices input/output
def load_training_data(input_path,matrix_word_embeddings,word2index):
matrix_input=[]
matrix_output=[]
first_line=True
input_file_relations=open(input_path, 'r', encoding='utf-8')
for line in input_file_relations:
linesplit=line.strip().split(" ")
if first_line==True:
dimensions=int(str(line.split(" ")[1]))
first_line=False
else:
relation=linesplit[0]
if "__" not in relation: sys.exit("ERROR: Pair '"+relation+"' does not contain underscore")
relation_split=relation.rsplit("__",1)
word1=relation_split[0]
word2=relation_split[1]
if word1 in word2index and word2 in word2index:
matrix_input.append(np.asarray([word2index[word1],word2index[word2]]))
matrix_output.append(np.asarray([float(dim) for dim in linesplit[1:dimensions+1]]))
return matrix_input,matrix_output,dimensions
#Split training and development data
def split_training_data(matrix_input,matrix_output,devsize,batchsize):
matrix_input_train=[]
matrix_output_train=[]
matrix_input_dev=[]
matrix_output_dev=[]
num_instances=int((len(matrix_input)//batchsize)*batchsize)
final_size_dev=int(((num_instances*devsize)//batchsize)*batchsize)
final_size_train=int(((num_instances-final_size_dev)//batchsize)*batchsize)
print ("Size train set: "+str(final_size_train))
print ("Size dev set: "+str(final_size_dev))
all_instances=range(num_instances)
list_index_dev=random.sample(all_instances,final_size_dev)
for i in range(num_instances):
if i in list_index_dev:
matrix_input_dev.append(matrix_input[i])
matrix_output_dev.append(matrix_output[i])
else:
matrix_input_train.append(matrix_input[i])
matrix_output_train.append(matrix_output[i])
return matrix_input_train,matrix_output_train,matrix_input_dev,matrix_output_dev
| 3,633
| 0
| 110
|
eb783e2a63d00549231dcb398c0fd227b6e984a0
| 1,671
|
py
|
Python
|
cogs/mod_watch.py
|
MarTCM/ryuko-ng
|
f28ae5924065096efd72ec5c0d9af895b4310293
|
[
"MIT"
] | 11
|
2018-12-23T14:51:20.000Z
|
2019-03-03T21:13:26.000Z
|
cogs/mod_watch.py
|
MarTCM/ryuko-ng
|
f28ae5924065096efd72ec5c0d9af895b4310293
|
[
"MIT"
] | 47
|
2019-03-11T18:32:05.000Z
|
2021-12-08T17:50:38.000Z
|
cogs/mod_watch.py
|
MarTCM/ryuko-ng
|
f28ae5924065096efd72ec5c0d9af895b4310293
|
[
"MIT"
] | 26
|
2019-03-11T17:04:05.000Z
|
2022-03-08T09:35:38.000Z
|
import discord
from discord.ext import commands
from discord.ext.commands import Cog
from helpers.checks import check_if_staff
from helpers.userlogs import setwatch
| 35.553191
| 76
| 0.655895
|
import discord
from discord.ext import commands
from discord.ext.commands import Cog
from helpers.checks import check_if_staff
from helpers.userlogs import setwatch
class ModWatch(Cog):
def __init__(self, bot):
self.bot = bot
@commands.guild_only()
@commands.check(check_if_staff)
@commands.command()
async def watch(self, ctx, target: discord.Member, *, note: str = ""):
"""Puts a user under watch, staff only."""
setwatch(target.id, ctx.author, True, target.name)
await ctx.send(f"{ctx.author.mention}: user is now on watch.")
@commands.guild_only()
@commands.check(check_if_staff)
@commands.command()
async def watchid(self, ctx, target: int, *, note: str = ""):
"""Puts a user under watch by userid, staff only."""
setwatch(target, ctx.author, True, target.name)
await ctx.send(f"{target.mention}: user is now on watch.")
@commands.guild_only()
@commands.check(check_if_staff)
@commands.command()
async def unwatch(self, ctx, target: discord.Member, *, note: str = ""):
"""Removes a user from watch, staff only."""
setwatch(target.id, ctx.author, False, target.name)
await ctx.send(f"{ctx.author.mention}: user is now not on watch.")
@commands.guild_only()
@commands.check(check_if_staff)
@commands.command()
async def unwatchid(self, ctx, target: int, *, note: str = ""):
"""Removes a user from watch by userid, staff only."""
setwatch(target, ctx.author, False, target.name)
await ctx.send(f"{target.mention}: user is now not on watch.")
def setup(bot):
bot.add_cog(ModWatch(bot))
| 51
| 1,407
| 46
|
89b0c8a21bc6d8dd82ceda3dad99a30e2b867960
| 4,219
|
py
|
Python
|
research/carls/candidate_sampling/candidate_sampler_config_builder_test.py
|
srihari-humbarwadi/neural-structured-learning
|
345b8d644dd7745179263bf6dc9aeb8a921528f4
|
[
"Apache-2.0"
] | 939
|
2019-08-28T06:50:30.000Z
|
2022-03-30T02:37:07.000Z
|
research/carls/candidate_sampling/candidate_sampler_config_builder_test.py
|
srihari-humbarwadi/neural-structured-learning
|
345b8d644dd7745179263bf6dc9aeb8a921528f4
|
[
"Apache-2.0"
] | 80
|
2019-09-01T19:47:30.000Z
|
2022-02-02T20:38:38.000Z
|
research/carls/candidate_sampling/candidate_sampler_config_builder_test.py
|
srihari-humbarwadi/neural-structured-learning
|
345b8d644dd7745179263bf6dc9aeb8a921528f4
|
[
"Apache-2.0"
] | 196
|
2019-09-01T19:38:53.000Z
|
2022-02-08T01:25:57.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for candidate_sampler_config_builder."""
from research.carls.candidate_sampling import candidate_sampler_config_builder as cs_config_builder
from research.carls.candidate_sampling import candidate_sampler_config_pb2 as cs_config_pb2
import tensorflow as tf
if __name__ == '__main__':
tf.test.main()
| 34.024194
| 99
| 0.71178
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for candidate_sampler_config_builder."""
from research.carls.candidate_sampling import candidate_sampler_config_builder as cs_config_builder
from research.carls.candidate_sampling import candidate_sampler_config_pb2 as cs_config_pb2
import tensorflow as tf
class CandidateSamplerConfigBuilderTest(tf.test.TestCase):
def test_negative_sampler(self):
self.assertProtoEquals(
"""
unique: true
sampler: UNIFORM
""",
cs_config_builder.negative_sampler(
True, cs_config_pb2.NegativeSamplerConfig.UNIFORM))
self.assertProtoEquals(
"""
unique: false
sampler: UNIFORM
""",
cs_config_builder.negative_sampler(
False, cs_config_pb2.NegativeSamplerConfig.UNIFORM))
self.assertProtoEquals(
"""
unique: true
sampler: LOG_UNIFORM
""",
cs_config_builder.negative_sampler(
True, cs_config_pb2.NegativeSamplerConfig.LOG_UNIFORM))
self.assertProtoEquals(
"""
unique: false
sampler: LOG_UNIFORM
""",
cs_config_builder.negative_sampler(
False, cs_config_pb2.NegativeSamplerConfig.LOG_UNIFORM))
self.assertProtoEquals(
"""
unique: false
sampler: UNIFORM
""", cs_config_builder.negative_sampler(False, 'UNIFORM'))
self.assertProtoEquals(
"""
unique: true
sampler: LOG_UNIFORM
""", cs_config_builder.negative_sampler(True, 'LOG_UNIFORM'))
def test_brute_force_topk_sampler_success(self):
self.assertProtoEquals("""
similarity_type: COSINE
""", cs_config_builder.brute_force_topk_sampler('COSINE'))
self.assertProtoEquals(
"""
similarity_type: COSINE
""", cs_config_builder.brute_force_topk_sampler(cs_config_pb2.COSINE))
self.assertProtoEquals(
"""
similarity_type: DOT_PRODUCT
""", cs_config_builder.brute_force_topk_sampler('DOT_PRODUCT'))
self.assertProtoEquals(
"""
similarity_type: DOT_PRODUCT
""", cs_config_builder.brute_force_topk_sampler(cs_config_pb2.DOT_PRODUCT))
def test_brute_force_topk_sampler_failed(self):
with self.assertRaises(ValueError):
cs_config_builder.brute_force_topk_sampler(cs_config_pb2.UNKNOWN)
with self.assertRaises(ValueError):
cs_config_builder.brute_force_topk_sampler('Unknown type string')
with self.assertRaises(ValueError):
cs_config_builder.brute_force_topk_sampler(cs_config_pb2.SampleContext())
with self.assertRaises(ValueError):
cs_config_builder.brute_force_topk_sampler(999)
def test_build_candidate_sampler_config_success(self):
self.assertProtoEquals(
"""
extension {
[type.googleapis.com/carls.candidate_sampling.BruteForceTopkSamplerConfig] {
similarity_type: COSINE
}
}
""",
cs_config_builder.build_candidate_sampler_config(
cs_config_builder.brute_force_topk_sampler('COSINE')))
self.assertProtoEquals(
"""
extension {
[type.googleapis.com/carls.candidate_sampling.NegativeSamplerConfig] {
unique: true
sampler: UNIFORM
}
}
""",
cs_config_builder.build_candidate_sampler_config(
cs_config_builder.negative_sampler(True, 'UNIFORM')))
def test_build_candidate_sampler_config_failed(self):
with self.assertRaises(ValueError):
cs_config_builder.build_candidate_sampler_config(100)
with self.assertRaises(ValueError):
cs_config_builder.build_candidate_sampler_config('invalid')
if __name__ == '__main__':
tf.test.main()
| 3,144
| 37
| 148
|
3089c603282eb0dd2e940a59b5b3d380394bef44
| 4,460
|
py
|
Python
|
experiments/tuning/tune_came.py
|
antoineBarbez/Project
|
8fa42b5198d03b5b142f413e218b7d7a2d994fc9
|
[
"MIT"
] | 4
|
2019-09-30T19:47:42.000Z
|
2020-02-13T18:46:32.000Z
|
experiments/tuning/tune_came.py
|
antoineBarbez/CAME
|
8fa42b5198d03b5b142f413e218b7d7a2d994fc9
|
[
"MIT"
] | null | null | null |
experiments/tuning/tune_came.py
|
antoineBarbez/CAME
|
8fa42b5198d03b5b142f413e218b7d7a2d994fc9
|
[
"MIT"
] | null | null | null |
from context import ROOT_DIR, nnUtils, train_came, came
import tensorflow as tf
import numpy as np
import argparse
import os
import progressbar
import random
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
if __name__ == "__main__":
args = parse_args()
data_x, data_y = train_came.build_dataset(train_came.training_systems, args.antipattern, args.history_length)
data_x, data_y = nnUtils.shuffle(data_x, data_y)
bar = progressbar.ProgressBar(maxval=args.n_test, \
widgets=['Performing cross validation: ' ,progressbar.Percentage()])
bar.start()
output_file_path = os.path.join(ROOT_DIR, 'experiments', 'tuning', 'results', 'came_' + args.antipattern + '_' + str(args.history_length) + '.csv')
params = []
perfs = []
for i in range(args.n_test):
learning_rate, beta, gamma, nb_filters, kernel_sizes, pool_sizes, dense_sizes = generateRandomHyperParameters(args.history_length)
params.append([learning_rate, beta, gamma, nb_filters, kernel_sizes, pool_sizes, dense_sizes])
predictions = np.empty(shape=[0, 1])
for j in range(args.n_fold):
x_train, y_train, x_test, y_test = get_cross_validation_dataset(data_x, data_y, j, args.n_fold)
# New graph
tf.reset_default_graph()
# Create model
model = came.CAME(
nb_metrics=x_train.shape[-1],
history_length=args.history_length,
filters=nb_filters,
kernel_sizes=kernel_sizes,
pool_sizes=pool_sizes,
dense_sizes=dense_sizes)
with tf.Session() as session:
# Initialize the variables of the TensorFlow graph.
session.run(tf.global_variables_initializer())
train(
session=session,
model=model,
x_train=x_train,
y_train=y_train,
num_step=args.n_step,
lr=learning_rate,
beta=beta,
gamma=gamma)
predictions = np.concatenate((predictions, session.run(model.inference, feed_dict={model.input_x: x_test})), axis=0)
perfs.append(nnUtils.f_measure(predictions, data_y))
indexes = np.argsort(np.array(perfs))
with open(output_file_path, 'w') as file:
file.write("Learning rate;Beta;Gamma;Filters;Kernel;Pool;Dense;F-measure\n")
for j in reversed(indexes):
for k in range(len(params[j])):
file.write(str(params[j][k]) + ';')
file.write(str(perfs[j]) + '\n')
bar.update(i+1)
bar.finish()
| 33.533835
| 148
| 0.717937
|
from context import ROOT_DIR, nnUtils, train_came, came
import tensorflow as tf
import numpy as np
import argparse
import os
import progressbar
import random
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("antipattern", help="either 'god_class' or 'feature_envy'")
parser.add_argument("history_length", type=int)
parser.add_argument("-n_fold", type=int, default=5)
parser.add_argument("-n_step", type=int, default=100)
parser.add_argument("-n_test", type=int, default=100)
return parser.parse_args()
def generateRandomHyperParameters(history_length):
learning_rate = 10**-random.uniform(0.0, 2.5)
beta = 10**-random.uniform(0.0, 2.5)
gamma = random.randint(1, 10)
nb_filters = []
kernel_sizes = []
pool_sizes = []
nb_conv_layer = 0 if history_length <= 1 else random.randint(0,1) if history_length <= 10 else random.randint(1,2) if history_length <= 100 else 2
for _ in range(nb_conv_layer):
nb_filter = random.randint(10,60)
kernel_size = random.randint(2,4)
pool_size = random.choice([2, 5, 10]) if history_length <=100 else random.choice([5, 10, 15, 20])
nb_filters.append(nb_filter)
kernel_sizes.append(kernel_size)
pool_sizes.append(pool_size)
minBound = 4
maxBound = 100
dense_sizes = []
nb_dense_layer = random.randint(1, 3)
for _ in range(nb_dense_layer):
dense_size = random.randint(minBound, maxBound)
dense_sizes.append(dense_size)
maxBound = dense_size
return learning_rate, beta, gamma, nb_filters, kernel_sizes, pool_sizes, dense_sizes
def get_cross_validation_dataset(X, Y, fold_index, n_fold):
folds_x, folds_y = nnUtils.split(X, Y, n_fold)
x_train = np.empty(shape=[0, X.shape[1], X.shape[2]])
y_train = np.empty(shape=[0, 1])
for i in range(n_fold):
if i != fold_index:
x_train = np.concatenate((x_train, folds_x[i]), axis=0)
y_train = np.concatenate((y_train, folds_y[i]), axis=0)
return x_train, y_train, folds_x[fold_index], folds_y[fold_index]
def train(session, model, x_train, y_train, num_step, lr, beta, gamma):
learning_rate = lr
for step in range(num_step):
feed_dict_train = {
model.input_x: x_train,
model.input_y: y_train,
model.learning_rate:learning_rate,
model.beta:beta,
model.gamma:gamma}
session.run(model.learning_step, feed_dict=feed_dict_train)
if __name__ == "__main__":
args = parse_args()
data_x, data_y = train_came.build_dataset(train_came.training_systems, args.antipattern, args.history_length)
data_x, data_y = nnUtils.shuffle(data_x, data_y)
bar = progressbar.ProgressBar(maxval=args.n_test, \
widgets=['Performing cross validation: ' ,progressbar.Percentage()])
bar.start()
output_file_path = os.path.join(ROOT_DIR, 'experiments', 'tuning', 'results', 'came_' + args.antipattern + '_' + str(args.history_length) + '.csv')
params = []
perfs = []
for i in range(args.n_test):
learning_rate, beta, gamma, nb_filters, kernel_sizes, pool_sizes, dense_sizes = generateRandomHyperParameters(args.history_length)
params.append([learning_rate, beta, gamma, nb_filters, kernel_sizes, pool_sizes, dense_sizes])
predictions = np.empty(shape=[0, 1])
for j in range(args.n_fold):
x_train, y_train, x_test, y_test = get_cross_validation_dataset(data_x, data_y, j, args.n_fold)
# New graph
tf.reset_default_graph()
# Create model
model = came.CAME(
nb_metrics=x_train.shape[-1],
history_length=args.history_length,
filters=nb_filters,
kernel_sizes=kernel_sizes,
pool_sizes=pool_sizes,
dense_sizes=dense_sizes)
with tf.Session() as session:
# Initialize the variables of the TensorFlow graph.
session.run(tf.global_variables_initializer())
train(
session=session,
model=model,
x_train=x_train,
y_train=y_train,
num_step=args.n_step,
lr=learning_rate,
beta=beta,
gamma=gamma)
predictions = np.concatenate((predictions, session.run(model.inference, feed_dict={model.input_x: x_test})), axis=0)
perfs.append(nnUtils.f_measure(predictions, data_y))
indexes = np.argsort(np.array(perfs))
with open(output_file_path, 'w') as file:
file.write("Learning rate;Beta;Gamma;Filters;Kernel;Pool;Dense;F-measure\n")
for j in reversed(indexes):
for k in range(len(params[j])):
file.write(str(params[j][k]) + ';')
file.write(str(perfs[j]) + '\n')
bar.update(i+1)
bar.finish()
| 2,067
| 0
| 92
|
183dfb78ccd3bcd30e74ce6de360e7e3b9981c39
| 2,304
|
py
|
Python
|
tests/test_paginators.py
|
thesadru/genshin.py
|
806b8d0dd059a06605e66dead917fdf550a552bc
|
[
"MIT"
] | 63
|
2021-10-04T19:53:54.000Z
|
2022-03-30T07:21:03.000Z
|
tests/test_paginators.py
|
thesadru/genshin.py
|
806b8d0dd059a06605e66dead917fdf550a552bc
|
[
"MIT"
] | 17
|
2021-11-16T20:42:52.000Z
|
2022-03-31T10:11:52.000Z
|
tests/test_paginators.py
|
thesadru/genshin.py
|
806b8d0dd059a06605e66dead917fdf550a552bc
|
[
"MIT"
] | 10
|
2021-10-16T22:41:41.000Z
|
2022-02-19T17:55:23.000Z
|
import typing
import pytest
from genshin import paginators
@pytest.fixture(name="counting_paginator")
| 28.097561
| 92
| 0.677951
|
import typing
import pytest
from genshin import paginators
class CountingPaginator(paginators.Paginator[int]):
_index = 0
async def __anext__(self) -> int:
if self._index >= 5:
self._complete()
self._index += 1
return self._index
@pytest.fixture(name="counting_paginator")
def counting_paginator_fixture():
return CountingPaginator()
async def test_paginator_iter(counting_paginator: paginators.Paginator[int]):
async for value in counting_paginator:
assert 1 <= value <= 5
async def test_paginator_flatten():
paginator = CountingPaginator()
assert await paginator.flatten() == [1, 2, 3, 4, 5]
paginator = CountingPaginator()
assert await paginator == [1, 2, 3, 4, 5]
async def test_paginator_next(counting_paginator: paginators.Paginator[int]):
assert await counting_paginator.next() == 1
async def test_paginator_next_empty():
paginator = paginators.base.BasicPaginator(())
with pytest.raises(StopAsyncIteration):
await paginator.__anext__()
with pytest.raises(LookupError):
await paginator.next()
async def test_buffered_paginator():
class MockBufferedPaginator(paginators.BufferedPaginator[int]):
async def next_page(self) -> typing.Sequence[int]:
index = self._counter - 1
return list(range(index, index + 5))
paginator = MockBufferedPaginator(limit=12)
assert not paginator.exhausted
values = await paginator.flatten()
assert values == list(range(0, 12))
assert paginator.exhausted
async def test_merged_paginator():
# from heapq.merge doc
sequences = [[1, 3, 5, 7], [0, 2, 4, 8], [5, 10, 15, 20], [], [25]]
iterators = [paginators.base.aiterate(x) for x in sequences]
paginator = paginators.MergedPaginator(iterators)
assert await paginator.flatten() == [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
async def test_merged_paginator_with_key():
# from heapq.merge doc
sequences = [["dog", "horse"], [], ["cat", "fish", "kangaroo"], ["rhinoceros"]]
iterators = [paginators.base.aiterate(x) for x in sequences]
paginator = paginators.MergedPaginator(iterators, key=len, limit=5)
assert await paginator.flatten(lazy=True) == ["dog", "cat", "fish", "horse", "kangaroo"]
| 1,912
| 72
| 206
|
9c0b38982301d37af313b38f7a230a9f3dcabbd3
| 5,190
|
py
|
Python
|
src/main/python/twitter/thermos/core/muxer.py
|
isomer/incubator-aurora
|
5f54d4de25413bb18acec16120eb18f3e08c6bf0
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/twitter/thermos/core/muxer.py
|
isomer/incubator-aurora
|
5f54d4de25413bb18acec16120eb18f3e08c6bf0
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/twitter/thermos/core/muxer.py
|
isomer/incubator-aurora
|
5f54d4de25413bb18acec16120eb18f3e08c6bf0
|
[
"Apache-2.0"
] | null | null | null |
import os
import errno
from twitter.common import log
from twitter.common.recordio import ThriftRecordReader
from gen.twitter.thermos.ttypes import RunnerCkpt
| 36.293706
| 95
| 0.668015
|
import os
import errno
from twitter.common import log
from twitter.common.recordio import ThriftRecordReader
from gen.twitter.thermos.ttypes import RunnerCkpt
class ProcessMuxer(object):
class ProcessExists(Exception): pass
class ProcessNotFound(Exception): pass
class CorruptCheckpoint(Exception): pass
def __init__(self, pathspec):
self._processes = {} # process_name => fp
self._watermarks = {} # process_name => sequence high watermark
self._pathspec = pathspec
def __del__(self):
for fp in filter(None, self._processes.values()):
fp.close()
def register(self, process_name, watermark=0):
log.debug('registering %s' % process_name)
if process_name in self._processes:
raise ProcessMuxer.ProcessExists("Process %s is already registered" % process_name)
self._processes[process_name] = None
self._watermarks[process_name] = watermark
def _bind_processes(self):
for process_name, fp in self._processes.items():
if fp is None:
process_ckpt = self._pathspec.given(process=process_name).getpath('process_checkpoint')
log.debug('ProcessMuxer binding %s => %s' % (process_name, process_ckpt))
try:
self._processes[process_name] = open(process_ckpt, 'r')
except IOError as e:
if e.errno == errno.ENOENT:
log.debug(' => bind failed, checkpoint not available yet.')
continue
else:
log.error("Unexpected inability to open %s! %s" % (process_ckpt, e))
except Exception as e:
log.error("Unexpected inability to open %s! %s" % (process_ckpt, e))
self._fast_forward_stream(process_name)
def _fast_forward_stream(self, process_name):
log.debug('Fast forwarding %s stream to seq=%s' % (process_name,
self._watermarks[process_name]))
assert self._processes.get(process_name) is not None
fp = self._processes[process_name]
rr = ThriftRecordReader(fp, RunnerCkpt)
current_watermark = -1
records = 0
while current_watermark < self._watermarks[process_name]:
last_pos = fp.tell()
record = rr.try_read()
if record is None:
break
new_watermark = record.process_status.seq
if new_watermark > self._watermarks[process_name]:
log.debug('Over-seeked %s [watermark = %s, high watermark = %s], rewinding.' % (
process_name, new_watermark, self._watermarks[process_name]))
fp.seek(last_pos)
break
current_watermark = new_watermark
records += 1
if current_watermark < self._watermarks[process_name]:
log.warning('Only able to fast forward to %s@sequence=%s, high watermark is %s' % (
process_name, current_watermark, self._watermarks[process_name]))
if records:
log.debug('Fast forwarded %s %s record(s) to seq=%s.' % (process_name, records,
current_watermark))
def unregister(self, process_name):
log.debug('unregistering %s' % process_name)
if process_name not in self._processes:
raise ProcessMuxer.ProcessNotFound("No trace of process: %s" % process_name)
else:
self._watermarks.pop(process_name)
fp = self._processes.pop(process_name)
if fp is not None:
fp.close()
def has_data(self, process):
"""
Return true if we think that there are updates available from the supplied process.
"""
self._bind_processes()
# TODO(wickman) Should this raise ProcessNotFound?
if process not in self._processes:
return False
fp = self._processes[process]
rr = ThriftRecordReader(fp, RunnerCkpt)
old_pos = fp.tell()
try:
expected_new_pos = os.fstat(fp.fileno()).st_size
except OSError as e:
log.debug('ProcessMuxer could not fstat for process %s' % process)
return False
update = rr.try_read()
if update:
fp.seek(old_pos)
return True
return False
def select(self):
"""
Read and multiplex checkpoint records from all the forked off process coordinators.
Checkpoint records can come from one of two places:
in-process: checkpoint records synthesized for FORKED and LOST events
out-of-process: checkpoint records from from file descriptors of forked coordinators
Returns a list of RunnerCkpt objects that were successfully read, or an empty
list if none were read.
"""
self._bind_processes()
updates = []
for handle in filter(None, self._processes.values()):
try:
fstat = os.fstat(handle.fileno())
except OSError as e:
log.error('Unable to fstat %s!' % handle.name)
continue
if handle.tell() > fstat.st_size:
log.error('Truncated checkpoint record detected on %s!' % handle.name)
elif handle.tell() < fstat.st_size:
rr = ThriftRecordReader(handle, RunnerCkpt)
while True:
process_update = rr.try_read()
if process_update:
updates.append(process_update)
else:
break
if len(updates) > 0:
log.debug('select() returning %s updates:' % len(updates))
for update in updates:
log.debug(' = %s' % update)
return updates
| 2,785
| 2,221
| 23
|
35765a3f52057a1d8c00d42bc632985e8ea22e07
| 4,364
|
py
|
Python
|
confluent_server/confluent/discovery/handlers/imm.py
|
brianfinley/confluent
|
6458eac93b1e3c6d45e26a7ddb434d692b5cdff2
|
[
"Apache-2.0"
] | 27
|
2015-02-11T13:56:46.000Z
|
2021-12-28T14:17:20.000Z
|
confluent_server/confluent/discovery/handlers/imm.py
|
brianfinley/confluent
|
6458eac93b1e3c6d45e26a7ddb434d692b5cdff2
|
[
"Apache-2.0"
] | 32
|
2015-09-23T13:19:04.000Z
|
2022-03-15T13:50:45.000Z
|
confluent_server/confluent/discovery/handlers/imm.py
|
brianfinley/confluent
|
6458eac93b1e3c6d45e26a7ddb434d692b5cdff2
|
[
"Apache-2.0"
] | 24
|
2015-07-14T20:41:55.000Z
|
2021-07-15T04:18:51.000Z
|
# Copyright 2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import codecs
import confluent.discovery.handlers.bmc as bmchandler
import pyghmi.exceptions as pygexc
import pyghmi.ipmi.private.util as pygutil
import confluent.util as util
import struct
| 38.964286
| 77
| 0.552704
|
# Copyright 2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import codecs
import confluent.discovery.handlers.bmc as bmchandler
import pyghmi.exceptions as pygexc
import pyghmi.ipmi.private.util as pygutil
import confluent.util as util
import struct
class NodeHandler(bmchandler.NodeHandler):
devname = 'IMM'
@classmethod
def adequate(cls, info):
# We can sometimes receive a partially initialized SLP packet
# This is not adequate for being satisfied
return bool(info.get('attributes', {}))
def scan(self):
slpattrs = self.info.get('attributes', {})
self.isdense = False
try:
ff = slpattrs.get('enclosure-form-factor', [''])[0]
except IndexError:
return
wronguuid = slpattrs.get('node-uuid', [''])[0]
if wronguuid:
# we need to fix the first three portions of the uuid
uuidprefix = wronguuid.split('-')[:3]
uuidprefix = codecs.encode(struct.pack(
'<IHH', *[int(x, 16) for x in uuidprefix]), 'hex')
uuidprefix = util.stringify(uuidprefix)
uuidprefix = uuidprefix[:8] + '-' + uuidprefix[8:12] + '-' + \
uuidprefix[12:16]
self.info['uuid'] = uuidprefix + '-' + '-'.join(
wronguuid.split('-')[3:])
self.info['uuid'] = self.info['uuid'].lower()
room = slpattrs.get('room-id', [None])[0]
if room:
self.info['room'] = room
rack = slpattrs.get('rack-id', [None])[0]
if rack:
self.info['rack'] = rack
name = slpattrs.get('name', [None])[0]
if name:
self.info['hostname'] = name
unumber = slpattrs.get('lowest-u', [None])[0]
if unumber:
self.info['u'] = unumber
location = slpattrs.get('location', [None])[0]
if location:
self.info['location'] = location
if ff not in ('dense-computing', 'BC2'):
# do not probe unless it's a dense platform
return
self.isdense = True
encuuid = slpattrs.get('chassis-uuid', [None])[0]
if encuuid:
self.info['enclosure.uuid'] = encuuid
slot = int(slpattrs.get('slot', ['0'])[0])
if slot != 0:
self.info['enclosure.bay'] = slot
def probe(self):
if self.info.get('enclosure.bay', 0) == 0:
self.scan()
if self.info.get('enclosure.bay', 0) != 0:
# scan has already populated info
return
ff = self.info.get('attributes', {}).get('enclosure-form-factor', '')
if ff != 'dense-computing':
return
try:
# we are a dense platform, but the SLP data did not give us slot
# attempt to probe using IPMI
ipmicmd = self._get_ipmicmd()
guiddata = ipmicmd.xraw_command(netfn=6, command=8)
self.info['uuid'] = pygutil.decode_wireformat_uuid(
guiddata['data']).lower()
ipmicmd.oem_init()
bayid = ipmicmd._oem.immhandler.get_property(
'/v2/cmm/sp/7')
if not bayid:
return
self.info['enclosure.bay'] = int(bayid)
smmid = ipmicmd._oem.immhandler.get_property(
'/v2/ibmc/smm/chassis/uuid')
if not smmid:
return
smmid = smmid.lower().replace(' ', '')
smmid = '{0}-{1}-{2}-{3}-{4}'.format(smmid[:8], smmid[8:12],
smmid[12:16], smmid[16:20],
smmid[20:])
self.info['enclosure.uuid'] = smmid
self.info['enclosure.type'] = 'smm'
except pygexc.IpmiException as ie:
print(repr(ie))
raise
| 3,440
| 139
| 23
|
c2c0dc95899f6f8dad0a7096d7c04088b895f8b1
| 363
|
py
|
Python
|
alembic/versions/0367b739bb81_add_country_code_to_table.py
|
danieliheonu/bigfastapi
|
483554776195c9f38bb46ba719b613360eda1028
|
[
"MIT"
] | 1
|
2022-03-20T21:46:05.000Z
|
2022-03-20T21:46:05.000Z
|
alembic/versions/0367b739bb81_add_country_code_to_table.py
|
danieliheonu/bigfastapi
|
483554776195c9f38bb46ba719b613360eda1028
|
[
"MIT"
] | null | null | null |
alembic/versions/0367b739bb81_add_country_code_to_table.py
|
danieliheonu/bigfastapi
|
483554776195c9f38bb46ba719b613360eda1028
|
[
"MIT"
] | null | null | null |
"""add country code to table
Revision ID: 0367b739bb81
Revises: 1e09924c1938
Create Date: 2022-01-27 16:10:57.297020
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0367b739bb81'
down_revision = '1e09924c1938'
branch_labels = None
depends_on = None
| 14.52
| 40
| 0.741047
|
"""add country code to table
Revision ID: 0367b739bb81
Revises: 1e09924c1938
Create Date: 2022-01-27 16:10:57.297020
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0367b739bb81'
down_revision = '1e09924c1938'
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
| 6
| 0
| 46
|
9c35a421d2475f3566cf629c7b74b3188447fc25
| 152
|
py
|
Python
|
scripts/visualize_dataset.py
|
birlrobotics/smach_based_introspection_framework
|
f16742339cddfc86effba4dbf6e5062304704b89
|
[
"BSD-3-Clause"
] | 7
|
2018-02-23T13:02:13.000Z
|
2020-07-28T18:27:47.000Z
|
scripts/visualize_dataset.py
|
birlrobotics/smach_based_introspection_framework
|
f16742339cddfc86effba4dbf6e5062304704b89
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/visualize_dataset.py
|
birlrobotics/smach_based_introspection_framework
|
f16742339cddfc86effba4dbf6e5062304704b89
|
[
"BSD-3-Clause"
] | 1
|
2019-06-24T09:20:06.000Z
|
2019-06-24T09:20:06.000Z
|
#!/usr/bin/env python
import os
import smach_based_introspection_framework.offline_part.visualize_dataset as m
if __name__ == "__main__":
m.run()
| 19
| 78
| 0.776316
|
#!/usr/bin/env python
import os
import smach_based_introspection_framework.offline_part.visualize_dataset as m
if __name__ == "__main__":
m.run()
| 0
| 0
| 0
|
489726ea2da03626c5a2318798d31acaac09e9b1
| 12,261
|
py
|
Python
|
packages/weevely/modules/net/proxy.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | null | null | null |
packages/weevely/modules/net/proxy.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | null | null | null |
packages/weevely/modules/net/proxy.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | null | null | null |
from core.loggers import log, dlog
from core import messages
from core.vectors import ModuleExec
from core.module import Module
from core.config import base_path
from http.server import HTTPServer, BaseHTTPRequestHandler
from tempfile import gettempdir
from socketserver import ThreadingMixIn
from urllib.parse import urlparse, urlunparse, ParseResult
from io import StringIO
from http.client import HTTPResponse
import threading
import re
import os
import sys
import socket
import ssl
import select
import http.client
import urllib.parse
import threading
import time
import json
import re
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
from io import BytesIO
from subprocess import Popen, PIPE
from html.parser import HTMLParser
from tempfile import mkdtemp
re_valid_ip = re.compile(
"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$")
re_valid_hostname = re.compile("^(([a-zA-Z0-9\-]+)\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])$")
temp_certdir = mkdtemp()
lock = threading.Lock()
# Create path for the CA certificates and keys
cert_folder = os.path.join(base_path, 'certs')
try:
os.makedirs(cert_folder)
except:
pass
#
# Most of the Proxy part has been taken from https://github.com/inaz2/proxy2
#
class Proxy(Module):
"""Run local proxy to pivot HTTP/HTTPS browsing through the target."""
| 32.350923
| 119
| 0.575402
|
from core.loggers import log, dlog
from core import messages
from core.vectors import ModuleExec
from core.module import Module
from core.config import base_path
from http.server import HTTPServer, BaseHTTPRequestHandler
from tempfile import gettempdir
from socketserver import ThreadingMixIn
from urllib.parse import urlparse, urlunparse, ParseResult
from io import StringIO
from http.client import HTTPResponse
import threading
import re
import os
import sys
import socket
import ssl
import select
import http.client
import urllib.parse
import threading
import time
import json
import re
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
from io import BytesIO
from subprocess import Popen, PIPE
from html.parser import HTMLParser
from tempfile import mkdtemp
re_valid_ip = re.compile(
"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$")
re_valid_hostname = re.compile("^(([a-zA-Z0-9\-]+)\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])$")
temp_certdir = mkdtemp()
lock = threading.Lock()
class FakeSocket():
def __init__(self, response_str):
self._file = BytesIO(response_str)
def makefile(self, *args, **kwargs):
return self._file
# Create path for the CA certificates and keys
cert_folder = os.path.join(base_path, 'certs')
try:
os.makedirs(cert_folder)
except:
pass
def get_cert_path(path):
return os.path.join(cert_folder, path)
def initialize_certificates():
cakey_path = get_cert_path("ca.key")
cacrt_path = get_cert_path("ca.crt")
certkey_path = get_cert_path("cert.key")
if not os.path.isfile(cakey_path) or not os.path.isfile(cacrt_path) or not os.path.isfile(certkey_path):
# openssl genrsa -out ca.key 2048
p1 = Popen(["openssl", "genrsa", "-out", cakey_path, "2048"])
p1.communicate()
p1.wait()
# openssl req -new -x509 -days 3650 -key ca.key -out ca.crt -subj "/CN=proxy2 CA"
p2 = Popen(["openssl", "req", "-new", "-x509", "-days", "3650", "-key",
cakey_path, "-out", cacrt_path, "-subj", "/CN=proxy2 CA"])
p2.communicate()
p2.wait()
# openssl genrsa -out cert.key 2048
p3 = Popen(["openssl", "genrsa", "-out", certkey_path, "2048"])
p3.communicate()
p3.wait()
#
# Most of the Proxy part has been taken from https://github.com/inaz2/proxy2
#
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
address_family = socket.AF_INET
daemon_threads = True
def handle_error(self, request, client_address):
# surpress socket/ssl related errors
cls, e = sys.exc_info()[:2]
if cls is socket.error or cls is ssl.SSLError:
pass
else:
return HTTPServer.handle_error(self, request, client_address)
class ProxyRequestHandler(BaseHTTPRequestHandler):
cakey = get_cert_path('ca.key')
cacert = get_cert_path('ca.crt')
certkey = get_cert_path('cert.key')
certdir = temp_certdir
timeout = 5
lock = threading.Lock()
def __init__(self, *args, **kwargs):
self.tls = threading.local()
self.tls.conns = {}
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_error(self, format, *args):
# surpress "Request timed out: timeout('timed out',)"
if isinstance(args[0], socket.timeout):
return
def do_CONNECT(self):
self.connect_intercept()
def connect_intercept(self):
hostname = self.path.split(':')[0]
certname = "%s.crt" % (hostname)
certpath = os.path.join(self.certdir, certname)
if not (re_valid_ip.match(hostname) or re_valid_hostname.match(hostname)):
log.warning("CN name '%s' is not valid, using 'www.weevely.com'" % (hostname))
hostname = 'www.weevely.com'
with self.lock:
if not os.path.isfile(certpath):
epoch = "%d" % (time.time() * 1000)
p1 = Popen(["openssl", "req", "-new", "-key", self.certkey, "-subj", "/CN=%s" % hostname], stdout=PIPE)
p2 = Popen(["openssl", "x509", "-req", "-days", "3650", "-CA", self.cacert, "-CAkey", self.cakey,
"-set_serial", epoch, "-out", certpath], stdin=p1.stdout, stderr=PIPE)
p2.communicate()
self.send_response_only(200, 'Connection Established')
self.end_headers()
try:
self.connection = ssl.wrap_socket(self.connection, keyfile=self.certkey, certfile=certpath,
server_side=True)
self.rfile = self.connection.makefile("rb", self.rbufsize)
self.wfile = self.connection.makefile("wb", self.wbufsize)
except Exception as e:
log.debug(e)
raise
conntype = self.headers.get('Proxy-Connection', '')
if self.protocol_version == "HTTP/1.1" and conntype.lower() != 'close':
self.close_connection = 0
else:
self.close_connection = 1
def connect_relay(self):
address = self.path.split(':', 1)
address[1] = int(address[1]) or 443
try:
s = socket.create_connection(address, timeout=self.timeout)
except Exception as e:
self.send_error(502)
return
self.send_response(200, 'Connection Established')
self.end_headers()
conns = [self.connection, s]
self.close_connection = 0
while not self.close_connection:
rlist, wlist, xlist = select.select(conns, [], conns, self.timeout)
if xlist or not rlist:
break
for r in rlist:
other = conns[1] if r is conns[0] else conns[0]
data = r.recv(8192)
if not data:
self.close_connection = 1
break
other.sendall(data)
def do_GET(self):
if self.path == 'http://weevely/':
self.send_cacert()
return
req = self
content_length = int(req.headers.get('Content-Length', 0))
req_body = self.rfile.read(content_length) if content_length else ''
if req.path[0] == '/':
if isinstance(self.connection, ssl.SSLSocket):
req.path = "https://%s%s" % (req.headers['Host'], req.path)
else:
req.path = "http://%s%s" % (req.headers['Host'], req.path)
req.headers['Content-length'] = str(len(req_body))
u = urllib.parse.urlsplit(req.path)
scheme, netloc, path = u.scheme, u.netloc, (u.path + '?' + u.query if u.query else u.path)
assert scheme in ('http', 'https')
if netloc:
req.headers['Host'] = netloc
setattr(req, 'headers', self.filter_headers(req.headers))
net_curl_args = [
'-X',
self.command,
'-i'
]
net_curl_args.append(self.path)
for h in req.headers:
if h.title().lower() == 'host':
host = self.headers[h]
else:
net_curl_args += ['-H', '%s: %s' % (h.title(), self.headers[h])]
if self.command == 'POST':
content_len = int(self.headers.get('content-length', 0))
net_curl_args += ['-d', req_body]
lock.acquire()
try:
result, headers, saved = ModuleExec(
'net_curl',
net_curl_args
).run()
finally:
lock.release()
if not headers:
log.debug('Error no headers')
self.send_error(502)
return
log.debug(
'> ' + '\r\n> '.join(
['%s: %s' % (
h.title(),
self.headers[h]
) for h in self.headers
]
)
)
log.debug('< ' + '\r\n< '.join([h.decode('utf-8', 'replace') for h in headers]))
http_response_str = b'\r\n'.join(headers) + b'\r\n\r\n' + result
source = FakeSocket(http_response_str)
res = HTTPResponse(source)
res.begin()
version_table = {10: 'HTTP/1.0', 11: 'HTTP/1.1'}
setattr(res, 'headers', res.msg)
setattr(res, 'response_version', version_table[res.version])
# support streaming
if not 'Content-Length' in res.headers and 'no-store' in res.headers.get('Cache-Control', ''):
setattr(res, 'headers', self.filter_headers(res.headers))
self.relay_streaming(res)
return
try:
res_body = res.read()
except Exception as e:
log.debug(e)
self.send_error(500)
return
setattr(res, 'headers', self.filter_headers(res.headers))
respstring = "%s %d %s\r\n" % (self.protocol_version, res.status, res.reason)
self.wfile.write(respstring.encode('utf-8'))
self.wfile.write(res.headers.as_bytes())
self.wfile.write(res_body)
self.wfile.flush()
def relay_streaming(self, res):
respstring = "%s %d %s\r\n" % (self.protocol_version, res.status, res.reason)
self.wfile.write(respstring.encode('utf-8'))
self.wfile.write(res.headers.as_bytes() + b"\r\n")
try:
while True:
chunk = res.read(8192)
if not chunk:
break
self.wfile.write(chunk)
self.wfile.flush()
except socket.error:
# connection closed by client
pass
do_HEAD = do_GET
do_POST = do_GET
do_PUT = do_GET
do_DELETE = do_GET
do_OPTIONS = do_GET
def filter_headers(self, headers):
# http://tools.ietf.org/html/rfc2616#section-13.5.1
hop_by_hop = (
'connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding',
'upgrade')
for k in hop_by_hop:
del headers[k]
return headers
def send_cacert(self):
with open(self.cacert, 'rb') as f:
data = f.read()
self.wfile.write("%s %d %s\r\n" % (self.protocol_version, 200, 'OK'))
self.send_header('Content-Type', 'application/x-x509-ca-cert')
self.send_header('Content-Length', len(data))
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(data)
def run_proxy2(HandlerClass=ProxyRequestHandler, ServerClass=ThreadingHTTPServer, protocol="HTTP/1.1",
hostname='127.0.0.1', port='8080'):
server_address = (hostname, port)
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
httpd.serve_forever()
class Proxy(Module):
"""Run local proxy to pivot HTTP/HTTPS browsing through the target."""
def init(self):
self.register_info(
{
'author': [
'Emilio Pinna'
],
'license': 'GPLv3'
}
)
self.register_arguments([
{'name': '-lhost', 'default': '127.0.0.1'},
{'name': '-lport', 'default': 8080, 'type': int},
{'name': '-no-background', 'action': 'store_true', 'default': False, 'help': 'Run foreground'}
])
def run(self):
log.warning(messages.module_net_proxy.proxy_starting_s_i % (self.args['lhost'], self.args['lport']))
log.warning(messages.module_net_proxy.proxy_set_proxy)
initialize_certificates()
if self.args['no_background']:
log.warning(messages.module_net_proxy.proxy_started_foreground)
run_proxy2(
hostname=self.args['lhost'],
port=self.args['lport']
)
else:
log.warning(messages.module_net_proxy.proxy_started_background)
server_thread = threading.Thread(target=run_proxy2, kwargs={
'hostname': self.args['lhost'],
'port': self.args['lport']
})
server_thread.daemon = True
server_thread.start()
| 9,893
| 686
| 245
|
c1a9fb6d2776eae6bdea4a9ec9110150e80c3b44
| 65
|
py
|
Python
|
or_testbed/solvers/grasp/__init__.py
|
Fynardo/or-testbed
|
9cc58edd71f400da7a933f166cd325e43562cfb6
|
[
"MIT"
] | 1
|
2020-07-23T14:59:03.000Z
|
2020-07-23T14:59:03.000Z
|
or_testbed/solvers/grasp/__init__.py
|
Fynardo/or-testbed
|
9cc58edd71f400da7a933f166cd325e43562cfb6
|
[
"MIT"
] | null | null | null |
or_testbed/solvers/grasp/__init__.py
|
Fynardo/or-testbed
|
9cc58edd71f400da7a933f166cd325e43562cfb6
|
[
"MIT"
] | null | null | null |
from .construct import GraspConstruct, MultiStartGraspConstruct
| 32.5
| 64
| 0.876923
|
from .construct import GraspConstruct, MultiStartGraspConstruct
| 0
| 0
| 0
|
30c3599ff9f4efeffbeca8fb7c06634e903655c8
| 1,151
|
py
|
Python
|
tsengine/test/test_pool.py
|
ccccxjin/TsEngine
|
5f8deed436eb9756be40f78a7bf52be9e910b501
|
[
"MIT"
] | 1
|
2020-07-10T09:11:38.000Z
|
2020-07-10T09:11:38.000Z
|
tsengine/test/test_pool.py
|
ccccxjin/tsengine
|
5f8deed436eb9756be40f78a7bf52be9e910b501
|
[
"MIT"
] | null | null | null |
tsengine/test/test_pool.py
|
ccccxjin/tsengine
|
5f8deed436eb9756be40f78a7bf52be9e910b501
|
[
"MIT"
] | null | null | null |
import pytest
| 27.404762
| 54
| 0.578627
|
import pytest
class TestPool:
def test_pool_get(self, pool):
assert pool.checkedin == 1
assert pool.checkedout == 0
connection1 = pool.get()
assert pool.checkedin == 0
assert pool.checkedout == 1
connection2 = pool.get()
assert pool.checkedin == 0
assert pool.checkedout == 2
connection3 = pool.get()
assert pool.checkedin == 0
assert pool.checkedout == 3
assert pool.busy
pool.wait = False
with pytest.raises(TimeoutError) as e:
pool.get()
exec_msg = e.value.args[0]
assert exec_msg == 'get session timeout error'
pool.wait = 3
with pytest.raises(TimeoutError) as e:
pool.get()
exec_msg = e.value.args[0]
assert exec_msg == 'get session timeout error'
pool.put(connection1)
assert pool.checkedin == 1
assert pool.checkedout == 2
pool.put(connection2)
assert pool.checkedin == 2
assert pool.checkedout == 1
pool.put(connection3)
assert pool.checkedin == 3
assert pool.checkedout == 0
| 1,093
| -6
| 49
|
e5d6df24af9bac17d018ee4f885d2b0a7d316e52
| 4,965
|
py
|
Python
|
course_grader/views/api/submitted_graderoster.py
|
uw-it-aca/gradepage
|
7059d715cc112ad0ecb0e5012f716e525ee7b3bc
|
[
"Apache-2.0"
] | 1
|
2017-01-29T09:52:06.000Z
|
2017-01-29T09:52:06.000Z
|
course_grader/views/api/submitted_graderoster.py
|
uw-it-aca/gradepage
|
7059d715cc112ad0ecb0e5012f716e525ee7b3bc
|
[
"Apache-2.0"
] | 287
|
2017-03-09T00:17:20.000Z
|
2022-01-08T00:36:34.000Z
|
course_grader/views/api/submitted_graderoster.py
|
uw-it-aca/gradepage
|
7059d715cc112ad0ecb0e5012f716e525ee7b3bc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.conf import settings
from django.http import HttpResponse
from django.views.decorators.cache import never_cache
from django.utils.decorators import method_decorator
from uw_saml.decorators import group_required
from course_grader.views.rest_dispatch import RESTDispatch
from course_grader.models import (
SubmittedGradeRoster as SubmittedGradeRosterModel)
from course_grader.dao.person import person_from_regid, person_display_name
from course_grader.dao.section import section_from_label
from course_grader.dao.term import term_from_param
from uw_sws_graderoster.models import GradeRoster
from lxml import etree
from logging import getLogger
import csv
logger = getLogger(__name__)
@method_decorator(group_required(settings.GRADEPAGE_SUPPORT_GROUP),
name='dispatch')
@method_decorator(never_cache, name='dispatch')
@method_decorator(group_required(settings.GRADEPAGE_SUPPORT_GROUP),
name='dispatch')
@method_decorator(never_cache, name='dispatch')
| 35.978261
| 76
| 0.627593
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.conf import settings
from django.http import HttpResponse
from django.views.decorators.cache import never_cache
from django.utils.decorators import method_decorator
from uw_saml.decorators import group_required
from course_grader.views.rest_dispatch import RESTDispatch
from course_grader.models import (
SubmittedGradeRoster as SubmittedGradeRosterModel)
from course_grader.dao.person import person_from_regid, person_display_name
from course_grader.dao.section import section_from_label
from course_grader.dao.term import term_from_param
from uw_sws_graderoster.models import GradeRoster
from lxml import etree
from logging import getLogger
import csv
logger = getLogger(__name__)
@method_decorator(group_required(settings.GRADEPAGE_SUPPORT_GROUP),
name='dispatch')
@method_decorator(never_cache, name='dispatch')
class SubmissionsByTerm(RESTDispatch):
def get(self, request, *args, **kwargs):
term_id = kwargs.get("term_id")
try:
selected_term = term_from_param(term_id)
except Exception as ex:
return self.error_response(400, "Invalid Term ID")
graderosters = SubmittedGradeRosterModel.objects.get_status_by_term(
selected_term)
response = self.csv_response(filename=term_id)
csv.register_dialect("unix_newline", lineterminator="\n")
writer = csv.writer(response, dialect="unix_newline")
writer.writerow([
"Section",
"Secondary section",
"Submitter",
"Submission datetime"
])
for graderoster in graderosters:
writer.writerow([
graderoster["section_id"],
graderoster["secondary_section_id"],
graderoster["submitted_by"],
graderoster["submitted_date"],
])
return response
@method_decorator(group_required(settings.GRADEPAGE_SUPPORT_GROUP),
name='dispatch')
@method_decorator(never_cache, name='dispatch')
class SubmittedGradeRoster(RESTDispatch):
def get(self, request, *args, **kwargs):
graderoster_id = kwargs.get("graderoster_id")
try:
model = SubmittedGradeRosterModel.objects.get(pk=graderoster_id)
section = section_from_label(model.section_id)
instructor = person_from_regid(model.instructor_id)
submitter = person_from_regid(model.submitted_by)
graderoster = GradeRoster.from_xhtml(
etree.fromstring(model.document.strip()),
section=section, instructor=instructor)
except SubmittedGradeRosterModel.DoesNotExist:
return self.error_response(404, "Not Found")
except Exception as ex:
logger.error(
"Download failed for graderoster model {}: {}".format(
graderoster_id, ex))
return self.error_response(500, "{}".format(ex))
if model.secondary_section_id is not None:
filename = model.secondary_section_id
else:
filename = model.section_id
response = self.csv_response(filename=filename)
csv.register_dialect("unix_newline", lineterminator="\n")
writer = csv.writer(response, dialect="unix_newline")
writer.writerow([
"Student number",
"Student name",
"Course",
"Section",
"Credits",
"Incomplete",
"Grade",
"Writing credit",
"Instructor name",
"Instructor netid",
"Submitter name",
"Submitter netid"
])
secondary_section = getattr(graderoster, "secondary_section", None)
for item in graderoster.items:
if (secondary_section is not None and
secondary_section.section_id != item.section_id):
continue
writer.writerow([
item.student_number,
"{first_name} {last_name}".format(
first_name=item.student_first_name,
last_name=item.student_surname),
"{curr_abbr} {course_num}".format(
curr_abbr=section.curriculum_abbr,
course_num=section.course_number),
item.section_id,
item.student_credits,
"I" if item.has_incomplete else "",
"X" if item.no_grade_now else str(item.grade),
"W" if item.has_writing_credit else "",
person_display_name(instructor),
instructor.uwnetid,
person_display_name(submitter),
submitter.uwnetid
])
logger.info("Graderoster downloaded: {}-{}".format(
model.section_id, model.instructor_id))
return response
| 3,738
| 37
| 96
|
72cac8fb30a2e307bde5d70d65c30b41c1787dec
| 1,235
|
py
|
Python
|
pipeline/boto_helpers.py
|
DMS-medical-informatics/beiwe-backend
|
55afe3a16e1c9b34501f3655288b5c19c663a083
|
[
"BSD-3-Clause"
] | null | null | null |
pipeline/boto_helpers.py
|
DMS-medical-informatics/beiwe-backend
|
55afe3a16e1c9b34501f3655288b5c19c663a083
|
[
"BSD-3-Clause"
] | null | null | null |
pipeline/boto_helpers.py
|
DMS-medical-informatics/beiwe-backend
|
55afe3a16e1c9b34501f3655288b5c19c663a083
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import os.path
import subprocess
import boto3
# This is all cribbed from the django branch's cluster_management/deployment_helpers folder
# TODO once the branches are merged, use that code and NOT this code
| 29.404762
| 99
| 0.728745
|
import json
import os.path
import subprocess
import boto3
# This is all cribbed from the django branch's cluster_management/deployment_helpers folder
# TODO once the branches are merged, use that code and NOT this code
def get_aws_object_names():
configs_folder = get_configs_folder()
with open(os.path.join(configs_folder, 'aws-object-names.json')) as fn:
return json.load(fn)
def get_boto_client(client_type):
from config.settings import BEIWE_SERVER_AWS_ACCESS_KEY_ID, BEIWE_SERVER_AWS_SECRET_ACCESS_KEY
aws_object_names = get_aws_object_names()
return boto3.client(
client_type,
aws_access_key_id=BEIWE_SERVER_AWS_ACCESS_KEY_ID,
aws_secret_access_key=BEIWE_SERVER_AWS_SECRET_ACCESS_KEY,
region_name=aws_object_names['region_name'],
)
def get_pipeline_folder():
return os.path.abspath(__file__).rsplit('/', 1)[0]
def get_configs_folder():
return os.path.join(get_pipeline_folder(), 'configs')
def set_default_region():
aws_object_names = get_aws_object_names()
region_name = aws_object_names['region_name']
subprocess.check_call(['aws', 'configure', 'set', 'default.region', region_name])
| 871
| 0
| 125
|
e5e52448863aa3d2032ea0acf739006c4aeffca6
| 1,783
|
py
|
Python
|
WaltzControl/use_cases/tel_controller_boundarys.py
|
DaneSpaeth/WaltzControl_refactored
|
80aa3e28f1e0709bc7dd9472bc1d841e9b4da9e7
|
[
"MIT"
] | null | null | null |
WaltzControl/use_cases/tel_controller_boundarys.py
|
DaneSpaeth/WaltzControl_refactored
|
80aa3e28f1e0709bc7dd9472bc1d841e9b4da9e7
|
[
"MIT"
] | null | null | null |
WaltzControl/use_cases/tel_controller_boundarys.py
|
DaneSpaeth/WaltzControl_refactored
|
80aa3e28f1e0709bc7dd9472bc1d841e9b4da9e7
|
[
"MIT"
] | null | null | null |
"""Boundarys for Responses from TelescopeController (TC) and Requests to TC.
Data entry and exit point into use_cases layer.
"""
class TelescopeControllerResponseBoundary:
"""Contains Responses from TelescopeController Device.
"""
def __init__(
self,
ra_response = None,
dec_response = None,
validate_response = None):
"""Store Responses of Telescope Controller as floats.
"""
self.ra_response = ra_response
self.dec_response = dec_response
self.validate_response = validate_response
def set_ra_response(self, ra):
"""Set ra response.
Input: ra as float in hours
"""
self.ra_response = ra
def set_dec_response(self, dec):
"""Set dec response.
Input: dec as float in degrees
"""
self.dec_response = dec
def set_validate_response(self, valid):
"""Set validate response.
Input: valid as boolean (accounts for Returns of Telesope Controllere
to set_target etc)
"""
self.validate_response = valid
def reset_responses(self):
"""Reset all responses to None.
"""
self.ra_response = None
self.dec_response = None
self.validate_response = None
def retrieve_position(self):
"""Returns ra and dec_responses.
"""
return (self.ra_response, self.dec_response)
class TelescopeControllerRequestBoundary:
"""Interface for commands to TelescopeController Device.
"""
| 28.758065
| 80
| 0.574313
|
"""Boundarys for Responses from TelescopeController (TC) and Requests to TC.
Data entry and exit point into use_cases layer.
"""
class TelescopeControllerResponseBoundary:
"""Contains Responses from TelescopeController Device.
"""
def __init__(
self,
ra_response = None,
dec_response = None,
validate_response = None):
"""Store Responses of Telescope Controller as floats.
"""
self.ra_response = ra_response
self.dec_response = dec_response
self.validate_response = validate_response
def set_ra_response(self, ra):
"""Set ra response.
Input: ra as float in hours
"""
self.ra_response = ra
def set_dec_response(self, dec):
"""Set dec response.
Input: dec as float in degrees
"""
self.dec_response = dec
def set_validate_response(self, valid):
"""Set validate response.
Input: valid as boolean (accounts for Returns of Telesope Controllere
to set_target etc)
"""
self.validate_response = valid
def reset_responses(self):
"""Reset all responses to None.
"""
self.ra_response = None
self.dec_response = None
self.validate_response = None
def retrieve_position(self):
"""Returns ra and dec_responses.
"""
return (self.ra_response, self.dec_response)
class TelescopeControllerRequestBoundary:
"""Interface for commands to TelescopeController Device.
"""
def __init__(self):
pass
def request_position(self):
pass
| 30
| 0
| 61
|
2992c83e0ce52d8039899799790c8ae2a72523fc
| 3,505
|
py
|
Python
|
example_group_epochs.py
|
DraganaMana/mne_microstates
|
de3dc76e63e49fb4b61810bf737d4d5d11f5b2f0
|
[
"MIT"
] | 1
|
2021-06-02T09:14:30.000Z
|
2021-06-02T09:14:30.000Z
|
example_group_epochs.py
|
DraganaMana/mne_microstates
|
de3dc76e63e49fb4b61810bf737d4d5d11f5b2f0
|
[
"MIT"
] | null | null | null |
example_group_epochs.py
|
DraganaMana/mne_microstates
|
de3dc76e63e49fb4b61810bf737d4d5d11f5b2f0
|
[
"MIT"
] | 1
|
2020-06-15T13:59:07.000Z
|
2020-06-15T13:59:07.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 25 18:04:32 2020
@author: Dragana
"""
import mne
import microstates as mst
import numpy as np
HC_RS_path = 'C:/Users/.../Documents/RS_EEG/'
subj_folder = ['subj01', 'subj02', 'subj03', 'subj04', 'subj05']
# Parameteres setting up
chan_to_drop = ['E67', 'E73', 'E247', 'E251', 'E256', 'E243', 'E246', 'E250',
'E255', 'E82', 'E91', 'E254', 'E249', 'E245', 'E242', 'E253',
'E252', 'E248', 'E244', 'E241', 'E92', 'E102', 'E103', 'E111',
'E112', 'E120', 'E121', 'E133', 'E134', 'E145', 'E146', 'E156',
'E165', 'E166', 'E174', 'E175', 'E187', 'E188', 'E199', 'E200',
'E208', 'E209', 'E216', 'E217', 'E228', 'E229', 'E232', 'E233',
'E236', 'E237', 'E240', 'E218', 'E227', 'E231', 'E235', 'E239',
'E219', 'E225', 'E226', 'E230', 'E234', 'E238']
pax = len(subj_folder) # number of participants
n_states = 4
n_inits = 10
EGI256 = True
if EGI256 == True:
n_channels = 256 - len(chan_to_drop)
grouped_maps = np.array([], dtype=np.int64).reshape(0, n_channels)
for i, f in enumerate(subj_folder):
fname = HC_RS_path + f + '/' + f +'_clean-epo.fif'
epochs = mne.read_epochs(fname, preload=True)
if EGI256 == True:
epochs.drop_channels(chan_to_drop)
data = epochs.get_data()
# Segment the data in microstates
maps, segmentation, gev, gfp_peaks = mst.segment(data, n_states, n_inits)
grouped_maps = np.concatenate((grouped_maps, maps), axis=0)
# Transpose the maps from maps(n_maps, n_channels) to maps(n_channels, n_maps)
# and treat the n_maps as a sample in time.
grouped_maps_T = grouped_maps.transpose()
# Find the group maps using k-means clustering
group_maps, group_gev = mst.segment(grouped_maps_T, n_states, n_inits, use_peaks=False)
# Plot the maps
mst.viz.plot_maps(group_maps, epochs.info)
# Fitting the maps back to the original epoched data by subject
grouped_segment, all_p = [], []
for i, f in enumerate(subj_folder):
fname = HC_RS_path + f + '/' + f +'_clean-epo.fif'
epochs = mne.read_epochs(fname, preload=True)
if EGI256 == True:
epochs.drop_channels(chan_to_drop)
data = epochs.get_data()
n_epochs, n_chans, n_samples = data.shape
# Make the data 2D
data = np.hstack(data)
# Compute final microstate segmentations on the original data
activation = group_maps.dot(data)
segmentation = np.argmax(np.abs(activation), axis=0)
# Add all the per subject segmentations in one array
# (n_times, subjects)
grouped_segment.append(segmentation)
# Plot the segmentation per subject
sfreq = epochs.info['sfreq']
times = np.arange(0, len(data[1])/sfreq, 1/sfreq)
mst.viz.plot_segmentation(segmentation[:500], data[:, :500], times[:500])
# p_empirical
epoched_data = True
p_hat = mst.analysis.p_empirical(segmentation, n_epochs, n_samples, n_states,
epoched_data)
all_p.append(p_hat)
# p_empirical printing
print("\n\t Empirical symbol distribution (RTT) per subject:\n")
for i in range(pax):
print("\n Subject", i)
for j in range(n_states):
print("\n\t\t p", j, " = {0:.5f}".format(all_p[i][j]))
all_p = np.vstack(all_p)
all_p /= pax
all_p_sum = np.sum(all_p, axis=0)
print("\n\t Empirical symbol distribution (RTT) for all subjects:\n")
for i in range(n_states):
print("\n\t\t p", i, " = {0:.5f}".format(all_p_sum[i]))
| 36.510417
| 87
| 0.628531
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 25 18:04:32 2020
@author: Dragana
"""
import mne
import microstates as mst
import numpy as np
HC_RS_path = 'C:/Users/.../Documents/RS_EEG/'
subj_folder = ['subj01', 'subj02', 'subj03', 'subj04', 'subj05']
# Parameteres setting up
chan_to_drop = ['E67', 'E73', 'E247', 'E251', 'E256', 'E243', 'E246', 'E250',
'E255', 'E82', 'E91', 'E254', 'E249', 'E245', 'E242', 'E253',
'E252', 'E248', 'E244', 'E241', 'E92', 'E102', 'E103', 'E111',
'E112', 'E120', 'E121', 'E133', 'E134', 'E145', 'E146', 'E156',
'E165', 'E166', 'E174', 'E175', 'E187', 'E188', 'E199', 'E200',
'E208', 'E209', 'E216', 'E217', 'E228', 'E229', 'E232', 'E233',
'E236', 'E237', 'E240', 'E218', 'E227', 'E231', 'E235', 'E239',
'E219', 'E225', 'E226', 'E230', 'E234', 'E238']
pax = len(subj_folder) # number of participants
n_states = 4
n_inits = 10
EGI256 = True
if EGI256 == True:
n_channels = 256 - len(chan_to_drop)
grouped_maps = np.array([], dtype=np.int64).reshape(0, n_channels)
for i, f in enumerate(subj_folder):
fname = HC_RS_path + f + '/' + f +'_clean-epo.fif'
epochs = mne.read_epochs(fname, preload=True)
if EGI256 == True:
epochs.drop_channels(chan_to_drop)
data = epochs.get_data()
# Segment the data in microstates
maps, segmentation, gev, gfp_peaks = mst.segment(data, n_states, n_inits)
grouped_maps = np.concatenate((grouped_maps, maps), axis=0)
# Transpose the maps from maps(n_maps, n_channels) to maps(n_channels, n_maps)
# and treat the n_maps as a sample in time.
grouped_maps_T = grouped_maps.transpose()
# Find the group maps using k-means clustering
group_maps, group_gev = mst.segment(grouped_maps_T, n_states, n_inits, use_peaks=False)
# Plot the maps
mst.viz.plot_maps(group_maps, epochs.info)
# Fitting the maps back to the original epoched data by subject
grouped_segment, all_p = [], []
for i, f in enumerate(subj_folder):
fname = HC_RS_path + f + '/' + f +'_clean-epo.fif'
epochs = mne.read_epochs(fname, preload=True)
if EGI256 == True:
epochs.drop_channels(chan_to_drop)
data = epochs.get_data()
n_epochs, n_chans, n_samples = data.shape
# Make the data 2D
data = np.hstack(data)
# Compute final microstate segmentations on the original data
activation = group_maps.dot(data)
segmentation = np.argmax(np.abs(activation), axis=0)
# Add all the per subject segmentations in one array
# (n_times, subjects)
grouped_segment.append(segmentation)
# Plot the segmentation per subject
sfreq = epochs.info['sfreq']
times = np.arange(0, len(data[1])/sfreq, 1/sfreq)
mst.viz.plot_segmentation(segmentation[:500], data[:, :500], times[:500])
# p_empirical
epoched_data = True
p_hat = mst.analysis.p_empirical(segmentation, n_epochs, n_samples, n_states,
epoched_data)
all_p.append(p_hat)
# p_empirical printing
print("\n\t Empirical symbol distribution (RTT) per subject:\n")
for i in range(pax):
print("\n Subject", i)
for j in range(n_states):
print("\n\t\t p", j, " = {0:.5f}".format(all_p[i][j]))
all_p = np.vstack(all_p)
all_p /= pax
all_p_sum = np.sum(all_p, axis=0)
print("\n\t Empirical symbol distribution (RTT) for all subjects:\n")
for i in range(n_states):
print("\n\t\t p", i, " = {0:.5f}".format(all_p_sum[i]))
| 0
| 0
| 0
|
d0c633f50b464b8e08988638cf34cd0815c70e55
| 1,142
|
py
|
Python
|
chunkymonkey/lib/base.py
|
shopzilla/chunky-monkey
|
2556055e87849e2a873a950a5e52429e516c8304
|
[
"Apache-2.0"
] | 1
|
2016-10-24T15:16:26.000Z
|
2016-10-24T15:16:26.000Z
|
chunkymonkey/lib/base.py
|
shopzilla/chunky-monkey
|
2556055e87849e2a873a950a5e52429e516c8304
|
[
"Apache-2.0"
] | null | null | null |
chunkymonkey/lib/base.py
|
shopzilla/chunky-monkey
|
2556055e87849e2a873a950a5e52429e516c8304
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2011 Shopzilla.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this 1 except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""The base Controller API
Provides the BaseController class for subclassing.
"""
from pylons.controllers import WSGIController
from pylons.templating import render_mako as render
| 36.83871
| 74
| 0.75394
|
#
# Copyright 2011 Shopzilla.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this 1 except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""The base Controller API
Provides the BaseController class for subclassing.
"""
from pylons.controllers import WSGIController
from pylons.templating import render_mako as render
class BaseController(WSGIController):
def __call__(self, environ, start_response):
"""Invoke the Controller"""
# WSGIController.__call__ dispatches to the Controller method
# the request is routed to. This routing information is
# available in environ['pylons.routes_dict']
return WSGIController.__call__(self, environ, start_response)
| 0
| 359
| 23
|
7d5cf31371d57d1d5e01bffec3ad52101c96988a
| 134,221
|
py
|
Python
|
carculator/inventory.py
|
SimonVoelker/carculator
|
e40d664c9b5612250cf9ad2c6fa2a199b0bf88c5
|
[
"BSD-3-Clause"
] | null | null | null |
carculator/inventory.py
|
SimonVoelker/carculator
|
e40d664c9b5612250cf9ad2c6fa2a199b0bf88c5
|
[
"BSD-3-Clause"
] | null | null | null |
carculator/inventory.py
|
SimonVoelker/carculator
|
e40d664c9b5612250cf9ad2c6fa2a199b0bf88c5
|
[
"BSD-3-Clause"
] | null | null | null |
from . import DATA_DIR
import sys
import glob
from .background_systems import BackgroundSystemModel
from .export import ExportInventory
from inspect import currentframe, getframeinfo
from pathlib import Path
from scipy import sparse
import csv
import itertools
import numexpr as ne
import numpy as np
import xarray as xr
REMIND_FILES_DIR = DATA_DIR / "IAM"
class InventoryCalculation:
"""
Build and solve the inventory for results characterization and inventory export
Vehicles to be analyzed can be filtered by passing a `scope` dictionary.
Some assumptions in the background system can also be adjusted by passing a `background_configuration` dictionary.
.. code-block:: python
scope = {
'powertrain':['BEV', 'FCEV', 'ICEV-p'],
}
bc = {'country':'CH', # considers electricity network losses for Switzerland
'custom electricity mix' : [[1,0,0,0,0,0,0,0,0,0], # in this case, 100% hydropower for the first year
[0,1,0,0,0,0,0,0,0,0],
[0,0,1,0,0,0,0,0,0,0],
[0,0,0,1,0,0,0,0,0,0],
], # in this case, 100% nuclear for the second year
'fuel blend':{
'cng':{ #specify fuel bland for compressed gas
'primary fuel':{
'type':'biogas',
'share':[0.9, 0.8, 0.7, 0.6] # shares per year. Must total 1 for each year.
},
'secondary fuel':{
'type':'syngas',
'share': [0.1, 0.2, 0.3, 0.4]
}
},
'diesel':{
'primary fuel':{
'type':'synthetic diesel',
'share':[0.9, 0.8, 0.7, 0.6]
},
'secondary fuel':{
'type':'biodiesel - cooking oil',
'share': [0.1, 0.2, 0.3, 0.4]
}
},
'petrol':{
'primary fuel':{
'type':'petrol',
'share':[0.9, 0.8, 0.7, 0.6]
},
'secondary fuel':{
'type':'bioethanol - wheat straw',
'share': [0.1, 0.2, 0.3, 0.4]
}
},
'hydrogen':{
'primary fuel':{'type':'electrolysis', 'share':[1, 0, 0, 0]},
'secondary fuel':{'type':'smr - natural gas', 'share':[0, 1, 1, 1]}
}
},
'energy storage': {
'electric': {
'type':'NMC',
'origin': 'NO'
},
'hydrogen': {
'type':'carbon fiber'
}
}
}
InventoryCalculation(CarModel.array,
background_configuration=background_configuration,
scope=scope,
scenario="RCP26")
The `custom electricity mix` key in the background_configuration dictionary defines an electricity mix to apply,
under the form of one or several array(s), depending on teh number of years to analyze,
that should total 1, of which the indices correspond to:
- [0]: hydro-power
- [1]: nuclear
- [2]: natural gas
- [3]: solar power
- [4]: wind power
- [5]: biomass
- [6]: coal
- [7]: oil
- [8]: geothermal
- [9]: waste incineration
If none is given, the electricity mix corresponding to the country specified in `country` will be selected.
If no country is specified, Europe applies.
The `primary` and `secondary` fuel keys contain an array with shares of alternative petrol fuel for each year, to create a custom blend.
If none is provided, a blend provided by the Integrated Assessment model REMIND is used, which will depend on the REMIND energy scenario selected.
Here is a list of available fuel pathways:
Hydrogen technologies
--------------------
electrolysis
smr - natural gas
smr - natural gas with CCS
smr - biogas
smr - biogas with CCS
coal gasification
wood gasification
wood gasification with CCS
Natural gas technologies
------------------------
cng
biogas
syngas
Diesel technologies
-------------------
diesel
biodiesel - algae
biodiesel - cooking oil
synthetic diesel
Petrol technologies
-------------------
petrol
bioethanol - wheat straw
bioethanol - maize starch
bioethanol - sugarbeet
bioethanol - forest residues
synthetic gasoline
:ivar array: array from the CarModel class
:vartype array: CarModel.array
:ivar scope: dictionary that contains filters for narrowing the analysis
:ivar background_configuration: dictionary that contains choices for background system
:ivar scenario: REMIND energy scenario to use ("SSP2-Baseline": business-as-usual,
"SSP2-PkBudg1100": limits cumulative GHG emissions to 1,100 gigatons by 2100,
"static": no forward-looking modification of the background inventories).
"SSP2-Baseline" selected by default.
.. code-block:: python
"""
def __getitem__(self, key):
"""
Make class['foo'] automatically filter for the parameter 'foo'
Makes the model code much cleaner
:param key: Parameter name
:type key: str
:return: `array` filtered after the parameter selected
"""
return self.temp_array.sel(parameter=key)
def get_results_table(self, split, sensitivity=False):
"""
Format an xarray.DataArray array to receive the results.
:param split: "components" or "impact categories". Split by impact categories only applicable when "endpoint" level is applied.
:return: xarrray.DataArray
"""
if split == "components":
cat = [
"direct - exhaust",
"direct - non-exhaust",
"energy chain",
"maintenance",
"glider",
"EoL",
"powertrain",
"energy storage",
"road",
]
dict_impact_cat = list(self.impact_categories.keys())
if sensitivity == False:
response = xr.DataArray(
np.zeros(
(
self.B.shape[1],
len(self.scope["size"]),
len(self.scope["powertrain"]),
len(self.scope["year"]),
len(cat),
self.iterations,
)
),
coords=[
dict_impact_cat,
self.scope["size"],
self.scope["powertrain"],
self.scope["year"],
cat,
np.arange(0, self.iterations),
],
dims=[
"impact_category",
"size",
"powertrain",
"year",
"impact",
"value",
],
)
else:
params = [a for a in self.array.value.values]
response = xr.DataArray(
np.zeros(
(
self.B.shape[1],
len(self.scope["size"]),
len(self.scope["powertrain"]),
len(self.scope["year"]),
self.iterations,
)
),
coords=[
dict_impact_cat,
self.scope["size"],
self.scope["powertrain"],
self.scope["year"],
params,
],
dims=["impact_category", "size", "powertrain", "year", "parameter"],
)
return response
def get_split_indices(self):
"""
Return list of indices to split the results into categories.
:return: list of indices
:rtype: list
"""
filename = "dict_split.csv"
filepath = DATA_DIR / filename
if not filepath.is_file():
raise FileNotFoundError("The dictionary of splits could not be found.")
with open(filepath) as f:
csv_list = [[val.strip() for val in r.split(";")] for r in f.readlines()]
(_, _, *header), *data = csv_list
csv_dict = {}
for row in data:
key, sub_key, *values = row
if key in csv_dict:
if sub_key in csv_dict[key]:
csv_dict[key][sub_key].append(
{"search by": values[0], "search for": values[1]}
)
else:
csv_dict[key][sub_key] = [
{"search by": values[0], "search for": values[1]}
]
else:
csv_dict[key] = {
sub_key: [{"search by": values[0], "search for": values[1]}]
}
flatten = itertools.chain.from_iterable
d = {}
l = []
d['direct - exhaust'] = []
d['direct - exhaust'].append(
self.inputs[("Carbon dioxide, fossil", ("air",), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Carbon dioxide, from soil or biomass stock", ("air",), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Cadmium", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Copper", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Chromium", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Nickel", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Selenium", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Zinc", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Chromium VI", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].extend(self.index_emissions)
d['direct - exhaust'].extend(self.index_noise)
l.append(d['direct - exhaust'])
for cat in csv_dict["components"]:
d[cat] = list(
flatten(
[
self.get_index_of_flows([l["search for"]], l["search by"])
for l in csv_dict["components"][cat]
]
)
)
l.append(d[cat])
list_ind = [d[x] for x in d]
maxLen = max(map(len, list_ind))
for row in list_ind:
while len(row) < maxLen:
row.extend([len(self.inputs) - 1])
return list(d.keys()), list_ind
def get_A_matrix(self):
"""
Load the A matrix. The A matrix contains exchanges of products (rows) between activities (columns).
:return: A matrix with three dimensions of shape (number of values, number of products, number of activities).
:rtype: numpy.ndarray
"""
filename = "A_matrix.csv"
filepath = (
Path(getframeinfo(currentframe()).filename)
.resolve()
.parent.joinpath("data/" + filename)
)
if not filepath.is_file():
raise FileNotFoundError("The technology matrix could not be found.")
initial_A = np.genfromtxt(filepath, delimiter=";")
new_A = np.identity(len(self.inputs))
new_A[0 : np.shape(initial_A)[0], 0 : np.shape(initial_A)[0]] = initial_A
# Resize the matrix to fit the number of iterations in `array`
new_A = np.resize(new_A, (self.array.shape[1], new_A.shape[0], new_A.shape[1]))
return new_A
def get_B_matrix(self):
"""
Load the B matrix. The B matrix contains impact assessment figures for a give impact assessment method,
per unit of activity. Its length column-wise equals the length of the A matrix row-wise.
Its length row-wise equals the number of impact assessment methods.
:param method: only "recipe" and "ilcd" available at the moment.
:param level: only "midpoint" available at the moment.
:return: an array with impact values per unit of activity for each method.
:rtype: numpy.ndarray
"""
if self.method == "recipe":
if self.method_type == "midpoint":
list_file_names = glob.glob(
str(REMIND_FILES_DIR) + "/*recipe_midpoint*{}*.csv".format(self.scenario)
)
B = np.zeros((len(list_file_names), 21, len(self.inputs)))
else:
list_file_names = glob.glob(
str(REMIND_FILES_DIR) + "/*recipe_endpoint*{}*.csv".format(self.scenario)
)
B = np.zeros((len(list_file_names), 3, len(self.inputs)))
else:
list_file_names = glob.glob(
str(REMIND_FILES_DIR) + "/*ilcd*{}*.csv".format(self.scenario)
)
B = np.zeros((len(list_file_names), 19, len(self.inputs)))
for f in list_file_names:
initial_B = np.genfromtxt(f, delimiter=";")
new_B = np.zeros((np.shape(initial_B)[0], len(self.inputs),))
new_B[0 : np.shape(initial_B)[0], 0 : np.shape(initial_B)[1]] = initial_B
B[list_file_names.index(f), :, :] = new_B
list_impact_categories = list(self.impact_categories.keys())
if self.scenario != "static":
response = xr.DataArray(
B,
coords=[
[2005, 2010, 2020, 2030, 2040, 2050],
list_impact_categories,
list(self.inputs.keys()),
],
dims=["year", "category", "activity"],
)
else:
response = xr.DataArray(
B,
coords=[
[2020],
list_impact_categories,
list(self.inputs.keys()),
],
dims=["year", "category", "activity"],
)
return response
def get_dict_input(self):
"""
Load a dictionary with tuple ("name of activity", "location", "unit", "reference product") as key, row/column
indices as values.
:return: dictionary with `label:index` pairs.
:rtype: dict
"""
filename = "dict_inputs_A_matrix.csv"
filepath = DATA_DIR / filename
if not filepath.is_file():
raise FileNotFoundError(
"The dictionary of activity labels could not be found."
)
csv_dict = {}
count = 0
with open(filepath) as f:
input_dict = csv.reader(f, delimiter=";")
for row in input_dict:
if "(" in row[1]:
new_str = row[1].replace("(", "")
new_str = new_str.replace(")", "")
new_str = [s.strip() for s in new_str.split(",") if s]
t = ()
for s in new_str:
if "low population" in s:
s = "low population density, long-term"
t += (s,)
break
else:
t += (s.replace("'", ""),)
csv_dict[(row[0], t, row[2])] = count
else:
csv_dict[(row[0], row[1], row[2], row[3])] = count
count += 1
return csv_dict
def get_dict_impact_categories(self):
"""
Load a dictionary with available impact assessment methods as keys, and assessment level and categories as values.
..code-block:: python
{'recipe': {'midpoint': ['freshwater ecotoxicity',
'human toxicity',
'marine ecotoxicity',
'terrestrial ecotoxicity',
'metal depletion',
'agricultural land occupation',
'climate change',
'fossil depletion',
'freshwater eutrophication',
'ionising radiation',
'marine eutrophication',
'natural land transformation',
'ozone depletion',
'particulate matter formation',
'photochemical oxidant formation',
'terrestrial acidification',
'urban land occupation',
'water depletion',
'human noise',
'primary energy, non-renewable',
'primary energy, renewable']
}
}
:return: dictionary
:rtype: dict
"""
filename = "dict_impact_categories.csv"
filepath = DATA_DIR / filename
if not filepath.is_file():
raise FileNotFoundError(
"The dictionary of impact categories could not be found."
)
csv_dict = {}
with open(filepath) as f:
input_dict = csv.reader(f, delimiter=";")
for row in input_dict:
if row[0] == self.method and row[3] == self.method_type:
csv_dict[row[2]] = {'method':row[1],
'category':row[2],
'type':row[3],
'abbreviation':row[4],
'unit':row[5],
'source':row[6]}
return csv_dict
def get_rev_dict_input(self):
"""
Reverse the self.inputs dictionary.
:return: reversed dictionary
:rtype: dict
"""
return {v: k for k, v in self.inputs.items()}
def get_index_vehicle_from_array(
self, items_to_look_for, items_to_look_for_also=None, method="or"
):
"""
Return list of row/column indices of self.array of labels that contain the string defined in `items_to_look_for`.
:param items_to_look_for: string to search for
:return: list
"""
if not isinstance(items_to_look_for, list):
items_to_look_for = [items_to_look_for]
if not items_to_look_for_also is None:
if not isinstance(items_to_look_for_also, list):
items_to_look_for_also = [items_to_look_for_also]
list_vehicles = self.array.desired.values.tolist()
if method == "or":
return [
list_vehicles.index(c)
for c in list_vehicles
if set(items_to_look_for).intersection(c)
]
if method == "and":
return [
list_vehicles.index(c)
for c in list_vehicles
if set(items_to_look_for).intersection(c)
and set(items_to_look_for_also).intersection(c)
]
def get_index_of_flows(self, items_to_look_for, search_by="name"):
"""
Return list of row/column indices of self.A of labels that contain the string defined in `items_to_look_for`.
:param items_to_look_for: string
:param search_by: "name" or "compartment" (for elementary flows)
:return: list of row/column indices
:rtype: list
"""
if search_by == "name":
return [
int(self.inputs[c])
for c in self.inputs
if all(ele in c[0].lower() for ele in items_to_look_for)
]
if search_by == "compartment":
return [
int(self.inputs[c])
for c in self.inputs
if all(ele in c[1] for ele in items_to_look_for)
]
def export_lci(
self,
presamples=True,
ecoinvent_compatibility=True,
ecoinvent_version="3.6",
db_name="carculator db",
):
"""
Export the inventory as a dictionary. Also return a list of arrays that contain pre-sampled random values if
:meth:`stochastic` of :class:`CarModel` class has been called.
:param presamples: boolean.
:param ecoinvent_compatibility: bool. If True, compatible with ecoinvent. If False, compatible with REMIND-ecoinvent.
:param ecoinvent_version: str. "3.5", "3.6" or "uvek"
:return: inventory, and optionally, list of arrays containing pre-sampled values.
:rtype: list
"""
# Create electricity and fuel market datasets
self.create_electricity_market_for_fuel_prep()
# Create electricity market dataset for battery production
self.create_electricity_market_for_battery_production()
self.set_inputs_in_A_matrix(self.array.values)
if presamples == True:
lci, array = ExportInventory(
self.A, self.rev_inputs, db_name=db_name
).write_lci(presamples, ecoinvent_compatibility, ecoinvent_version)
return (lci, array)
else:
lci = ExportInventory(self.A, self.rev_inputs, db_name=db_name).write_lci(
presamples, ecoinvent_compatibility, ecoinvent_version
)
return lci
def export_lci_to_bw(
self,
presamples=True,
ecoinvent_compatibility=True,
ecoinvent_version="3.6",
db_name="carculator db",
):
"""
Export the inventory as a `brightway2` bw2io.importers.base_lci.LCIImporter object
with the inventory in the `data` attribute.
.. code-block:: python
# get the inventory
i, _ = ic.export_lci_to_bw()
# import it in a Brightway2 project
i.match_database('ecoinvent 3.6 cutoff', fields=('name', 'unit', 'location', 'reference product'))
i.match_database("biosphere3", fields=('name', 'unit', 'categories'))
i.match_database(fields=('name', 'unit', 'location', 'reference product'))
i.match_database(fields=('name', 'unit', 'categories'))
# Create an additional biosphere database for the few flows that do not
# exist in "biosphere3"
i.create_new_biosphere("additional_biosphere", relink=True)
# Check if all exchanges link
i.statistics()
# Register the database
i.write_database()
:return: LCIImport object that can be directly registered in a `brightway2` project.
:rtype: bw2io.importers.base_lci.LCIImporter
"""
# Create electricity and fuel market datasets
self.create_electricity_market_for_fuel_prep()
# Create electricity market dataset for battery production
self.create_electricity_market_for_battery_production()
self.set_inputs_in_A_matrix(self.array.values)
if presamples == True:
lci, array = ExportInventory(
self.A, self.rev_inputs, db_name=db_name
).write_lci_to_bw(presamples, ecoinvent_compatibility, ecoinvent_version)
return (lci, array)
else:
lci = ExportInventory(
self.A, self.rev_inputs, db_name=db_name
).write_lci_to_bw(presamples, ecoinvent_compatibility, ecoinvent_version)
return lci
def export_lci_to_excel(
self,
directory=None,
ecoinvent_compatibility=True,
ecoinvent_version="3.6",
software_compatibility="brightway2",
filename=None,
):
"""
Export the inventory as an Excel file (if the destination software is Brightway2) or a CSV file (if the destination software is Simapro) file.
Also return the file path where the file is stored.
:param directory: directory where to save the file.
:type directory: str
:param ecoinvent_compatibility: If True, compatible with ecoinvent. If False, compatible with REMIND-ecoinvent.
:param ecoinvent_version: "3.6", "3.5" or "uvek"
:param software_compatibility: "brightway2" or "simapro"
:return: file path where the file is stored.
:rtype: str
"""
if software_compatibility not in ("brightway2", "simapro"):
raise NameError(
"The destination software argument is not valid. Choose between 'brightway2' or 'simapro'."
)
# Simapro inventory only for ecoinvent 3.5 or UVEK
if software_compatibility == "simapro":
if ecoinvent_version == "3.6":
print(
"Simapro-compatible inventory export is only available for ecoinvent 3.5 or UVEK."
)
return
ecoinvent_compatibility = True
ecoinvent_version = "3.5"
# Create electricity and fuel market datasets
self.create_electricity_market_for_fuel_prep()
# Create electricity market dataset for battery production
self.create_electricity_market_for_battery_production()
self.set_inputs_in_A_matrix(self.array.values)
fp = ExportInventory(
self.A, self.rev_inputs, db_name=filename or "carculator db"
).write_lci_to_excel(
directory,
ecoinvent_compatibility,
ecoinvent_version,
software_compatibility,
filename,
)
return fp
def define_electricity_mix_for_fuel_prep(self):
"""
This function defines a fuel mix based either on user-defined mix, or on default mixes for a given country.
The mix is calculated as the average mix, weighted by the distribution of annually driven kilometers.
:return:
"""
try:
losses_to_low = float(self.bs.losses[self.country]["LV"])
except KeyError:
# If losses for the country are not found, assume EU average
losses_to_low = float(self.bs.losses["RER"]["LV"])
if "custom electricity mix" in self.background_configuration:
# If a special electricity mix is specified, we use it
mix = self.background_configuration["custom electricity mix"]
else:
use_year = [
int(i)
for i in (
self.array.values[
self.array_inputs["lifetime kilometers"],
:,
self.get_index_vehicle_from_array(
[
"BEV",
"FCEV",
"PHEV-p",
"PHEV-d",
"ICEV-p",
"ICEV-d",
"HEV-p",
"HEV-d",
"ICEV-g",
]
),
]
/ self.array.values[
self.array_inputs["kilometers per year"],
:,
self.get_index_vehicle_from_array(
[
"BEV",
"FCEV",
"PHEV-p",
"PHEV-d",
"ICEV-p",
"ICEV-d",
"HEV-p",
"HEV-d",
"ICEV-g",
]
),
]
)
.mean(axis=1)
.reshape(-1, len(self.scope["year"]))
.mean(axis=0)
]
mix = [
self.bs.electricity_mix.sel(
country=self.country,
variable=[
"Hydro",
"Nuclear",
"Gas",
"Solar",
"Wind",
"Biomass",
"Coal",
"Oil",
"Geothermal",
"Waste",
],
)
.interp(
year=np.arange(y, y + use_year[self.scope["year"].index(y)]),
kwargs={"fill_value": "extrapolate"},
)
.mean(axis=0)
.values
if y + use_year[self.scope["year"].index(y)] <= 2050
else self.bs.electricity_mix.sel(
country=self.country,
variable=[
"Hydro",
"Nuclear",
"Gas",
"Solar",
"Wind",
"Biomass",
"Coal",
"Oil",
"Geothermal",
"Waste",
],
)
.interp(year=np.arange(y, 2051), kwargs={"fill_value": "extrapolate"})
.mean(axis=0)
.values
for y in self.scope["year"]
]
return mix
def create_electricity_market_for_fuel_prep(self):
""" This function fills the electricity market that supplies battery charging operations
and hydrogen production through electrolysis.
"""
try:
losses_to_low = float(self.bs.losses[self.country]["LV"])
except KeyError:
# If losses for the country are not found, assume EU average
losses_to_low = float(self.bs.losses["RER"]["LV"])
# Fill the electricity markets for battery charging and hydrogen production
for y in self.scope["year"]:
m = np.array(self.mix[self.scope["year"].index(y)]).reshape(-1, 10, 1)
# Add electricity technology shares
self.A[
np.ix_(
np.arange(self.iterations),
[self.inputs[self.elec_map[t]] for t in self.elec_map],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for fuel preparation" in i[0]
],
)
] = (m * -1 * losses_to_low)
# Add transmission network for high and medium voltage
self.A[
:,
self.inputs[
(
"transmission network construction, electricity, high voltage",
"CH",
"kilometer",
"transmission network, electricity, high voltage",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for fuel preparation" in i[0]
],
] = (6.58e-9 * -1 * losses_to_low)
self.A[
:,
self.inputs[
(
"transmission network construction, electricity, medium voltage",
"CH",
"kilometer",
"transmission network, electricity, medium voltage",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for fuel preparation" in i[0]
],
] = (1.86e-8 * -1 * losses_to_low)
self.A[
:,
self.inputs[
(
"transmission network construction, long-distance",
"UCTE",
"kilometer",
"transmission network, long-distance",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for fuel preparation" in i[0]
],
] = (3.17e-10 * -1 * losses_to_low)
# Add distribution network, low voltage
self.A[
:,
self.inputs[
(
"distribution network construction, electricity, low voltage",
"CH",
"kilometer",
"distribution network, electricity, low voltage",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for fuel preparation" in i[0]
],
] = (8.74e-8 * -1 * losses_to_low)
# Add supply of sulfur hexafluoride for transformers
self.A[
:,
self.inputs[
(
"market for sulfur hexafluoride, liquid",
"RER",
"kilogram",
"sulfur hexafluoride, liquid",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for fuel preparation" in i[0]
],
] = ((5.4e-8 + 2.99e-9) * -1 * losses_to_low)
# Add SF_6 leakage
self.A[
:,
self.inputs[("Sulfur hexafluoride", ("air",), "kilogram")],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for fuel preparation" in i[0]
],
] = ((5.4e-8 + 2.99e-9) * -1 * losses_to_low)
def create_electricity_market_for_battery_production(self):
"""
This function fills in the column in `self.A` concerned with the electricity mix used for manufacturing battery cells
:return:
"""
battery_tech = self.background_configuration["energy storage"]["electric"][
"type"
]
battery_origin = self.background_configuration["energy storage"]["electric"][
"origin"
]
try:
losses_to_low = float(self.bs.losses[battery_origin]["LV"])
except KeyError:
losses_to_low = float(self.bs.losses["CN"]["LV"])
mix_battery_manufacturing = (
self.bs.electricity_mix.sel(
country=battery_origin,
variable=[
"Hydro",
"Nuclear",
"Gas",
"Solar",
"Wind",
"Biomass",
"Coal",
"Oil",
"Geothermal",
"Waste",
],
)
.interp(year=self.scope["year"], kwargs={"fill_value": "extrapolate"})
.values
)
# Fill the electricity markets for battery production
for y in self.scope["year"]:
m = np.array(
mix_battery_manufacturing[self.scope["year"].index(y)]
).reshape(-1, 10, 1)
self.A[
np.ix_(
np.arange(self.iterations),
[self.inputs[self.elec_map[t]] for t in self.elec_map],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
)
] = (m * losses_to_low * -1)
# Add transmission network for high and medium voltage
self.A[
:,
self.inputs[
(
"transmission network construction, electricity, high voltage",
"CH",
"kilometer",
"transmission network, electricity, high voltage",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
] = (6.58e-9 * -1 * losses_to_low)
self.A[
:,
self.inputs[
(
"transmission network construction, electricity, medium voltage",
"CH",
"kilometer",
"transmission network, electricity, medium voltage",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
] = (1.86e-8 * -1 * losses_to_low)
self.A[
:,
self.inputs[
(
"transmission network construction, long-distance",
"UCTE",
"kilometer",
"transmission network, long-distance",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
] = (3.17e-10 * -1 * losses_to_low)
# Add distribution network, low voltage
self.A[
:,
self.inputs[
(
"distribution network construction, electricity, low voltage",
"CH",
"kilometer",
"distribution network, electricity, low voltage",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
] = (8.74e-8 * -1 * losses_to_low)
# Add supply of sulfur hexafluoride for transformers
self.A[
:,
self.inputs[
(
"market for sulfur hexafluoride, liquid",
"RER",
"kilogram",
"sulfur hexafluoride, liquid",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
] = ((5.4e-8 + 2.99e-9) * -1 * losses_to_low)
# Add SF_6 leakage
self.A[
:,
self.inputs[("Sulfur hexafluoride", ("air",), "kilogram")],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
] = ((5.4e-8 + 2.99e-9) * -1 * losses_to_low)
def set_actual_range(self):
"""
Set the actual range considering the blend.
Liquid bio-fuels and synthetic fuels typically have a lower calorific value. Hence, the need to recalculate
the vehicle range.
Modifies parameter `range` of `array` in place
"""
if {"ICEV-p", "HEV-p", "PHEV-p"}.intersection(set(self.scope["powertrain"])):
for y in self.scope["year"]:
share_primary = self.fuel_blends["petrol"]["primary"]["share"][
self.scope["year"].index(y)
]
lhv_primary = self.fuel_blends["petrol"]["primary"]["lhv"]
share_secondary = self.fuel_blends["petrol"]["secondary"]["share"][
self.scope["year"].index(y)
]
lhv_secondary = self.fuel_blends["petrol"]["secondary"]["lhv"]
index = self.get_index_vehicle_from_array(
["ICEV-p", "HEV-p", "PHEV-p"], y, method="and"
)
self.array.values[self.array_inputs["range"], :, index] = (
(
(
self.array.values[self.array_inputs["fuel mass"], :, index]
* share_primary
* lhv_primary
)
+ (
self.array.values[self.array_inputs["fuel mass"], :, index]
* share_secondary
* lhv_secondary
)
)
* 1000
/ self.array.values[self.array_inputs["TtW energy"], :, index]
)
if {"ICEV-d", "HEV-d", "PHEV-d"}.intersection(set(self.scope["powertrain"])):
for y in self.scope["year"]:
share_primary = self.fuel_blends["diesel"]["primary"]["share"][
self.scope["year"].index(y)
]
lhv_primary = self.fuel_blends["diesel"]["primary"]["lhv"]
share_secondary = self.fuel_blends["diesel"]["secondary"]["share"][
self.scope["year"].index(y)
]
lhv_secondary = self.fuel_blends["diesel"]["secondary"]["lhv"]
index = self.get_index_vehicle_from_array(
["ICEV-d", "PHEV-d", "HEV-d"], y, method="and"
)
self.array.values[self.array_inputs["range"], :, index] = (
(
(
self.array.values[self.array_inputs["fuel mass"], :, index]
* share_primary
* lhv_primary
)
+ (
self.array.values[self.array_inputs["fuel mass"], :, index]
* share_secondary
* lhv_secondary
)
)
* 1000
/ self.array.values[self.array_inputs["TtW energy"], :, index]
)
def define_fuel_blends(self):
"""
This function defines fuel blends from what is passed in `background_configuration`.
It populates a dictionary `self.fuel_blends` that contains the respective shares, lower heating values
and CO2 emission factors of the fuels used.
:return:
"""
fuels_lhv = {
"petrol": 42.4,
"bioethanol - wheat straw": 26.8,
"bioethanol - maize starch": 26.8,
"bioethanol - sugarbeet": 26.8,
"bioethanol - forest residues": 26.8,
"synthetic gasoline": 42.4,
"diesel": 42.8,
"biodiesel - cooking oil": 31.7,
"biodiesel - algae": 31.7,
"synthetic diesel": 43.3,
"cng": 55.5,
"biogas": 55.5,
"syngas": 55.5
}
fuels_CO2 = {
"petrol": 3.18,
"bioethanol - wheat straw": 1.91,
"bioethanol - maize starch": 1.91,
"bioethanol - sugarbeet": 1.91,
"bioethanol - forest residues": 1.91,
"synthetic gasoline": 3.18,
"diesel": 3.14,
"biodiesel - cooking oil": 2.85,
"biodiesel - algae": 2.85,
"synthetic diesel": 3.16,
"cng": 2.65,
"biogas": 2.65,
"syngas": 2.65
}
if {"ICEV-p", "HEV-p", "PHEV-p"}.intersection(set(self.scope["powertrain"])):
fuel_type = "petrol"
primary, secondary, primary_share, secondary_share = self.find_fuel_shares(
fuel_type
)
self.create_fuel_markets(
fuel_type, primary, secondary, primary_share, secondary_share
)
self.fuel_blends[fuel_type] = {
"primary": {
"type": primary,
"share": primary_share,
"lhv": fuels_lhv[primary],
"CO2": fuels_CO2[primary],
},
"secondary": {
"type": secondary,
"share": secondary_share,
"lhv": fuels_lhv[secondary],
"CO2": fuels_CO2[secondary],
},
}
if {"ICEV-d", "HEV-d", "PHEV-d"}.intersection(set(self.scope["powertrain"])):
fuel_type = "diesel"
primary, secondary, primary_share, secondary_share = self.find_fuel_shares(
fuel_type
)
self.create_fuel_markets(
fuel_type, primary, secondary, primary_share, secondary_share
)
self.fuel_blends[fuel_type] = {
"primary": {
"type": primary,
"share": primary_share,
"lhv": fuels_lhv[primary],
"CO2": fuels_CO2[primary],
},
"secondary": {
"type": secondary,
"share": secondary_share,
"lhv": fuels_lhv[secondary],
"CO2": fuels_CO2[secondary],
},
}
if {"ICEV-g"}.intersection(set(self.scope["powertrain"])):
fuel_type = "cng"
primary, secondary, primary_share, secondary_share = self.find_fuel_shares(
fuel_type
)
self.create_fuel_markets(
fuel_type, primary, secondary, primary_share, secondary_share
)
self.fuel_blends[fuel_type] = {
"primary": {"type": primary,
"share": primary_share,
"lhv": fuels_lhv[primary],
"CO2": fuels_CO2[primary]},
"secondary": {"type": secondary,
"share": secondary_share,
"lhv": fuels_lhv[primary],
"CO2": fuels_CO2[primary]},
}
if {"FCEV"}.intersection(set(self.scope["powertrain"])):
fuel_type = "hydrogen"
primary, secondary, primary_share, secondary_share = self.find_fuel_shares(
fuel_type
)
self.create_fuel_markets(
fuel_type, primary, secondary, primary_share, secondary_share
)
self.fuel_blends[fuel_type] = {
"primary": {"type": primary, "share": primary_share},
"secondary": {"type": secondary, "share": secondary_share},
}
if {"BEV", "PHEV-p", "PHEV-d"}.intersection(set(self.scope["powertrain"])):
fuel_type = "electricity"
self.create_fuel_markets(fuel_type)
def create_fuel_markets(
self,
fuel_type,
primary=None,
secondary=None,
primary_share=None,
secondary_share=None,
):
"""
This function creates markets for fuel, considering a given blend, a given fuel type and a given year.
It also adds separate electricity input in case hydrogen from electrolysis is needed somewhere in the fuel supply chain.
:return:
"""
d_fuels = {
"electrolysis": {
"name": (
"Hydrogen, gaseous, 700 bar, from electrolysis, at H2 fuelling station",
"RER",
"kilogram",
"Hydrogen, gaseous, 700 bar, from electrolysis, at H2 fuelling station",
),
"additional electricity": 58,
},
"smr - natural gas": {
"name": (
"Hydrogen, gaseous, 700 bar, from SMR NG w/o CCS, at H2 fuelling station",
"RER",
"kilogram",
"Hydrogen, gaseous, 700 bar, from SMR NG w/o CCS, at H2 fuelling station",
),
"additional electricity": 0,
},
"smr - natural gas with CCS": {
"name": (
"Hydrogen, gaseous, 700 bar, from SMR NG w CCS, at H2 fuelling station",
"RER",
"kilogram",
"Hydrogen, gaseous, 700 bar, from SMR NG w CCS, at H2 fuelling station",
),
"additional electricity": 0,
},
"smr - biogas": {
"name": (
"Hydrogen, gaseous, 700 bar, from SMR of biogas, at H2 fuelling station",
"RER",
"kilogram",
"Hydrogen, gaseous, 700 bar, from SMR of biogas, at H2 fuelling station",
),
"additional electricity": 0,
},
"smr - biogas with CCS": {
"name": (
"Hydrogen, gaseous, 700 bar, from SMR of biogas with CCS, at H2 fuelling station",
"RER",
"kilogram",
"Hydrogen, gaseous, 700 bar, from SMR of biogas with CCS, at H2 fuelling station",
),
"additional electricity": 0,
},
"coal gasification": {
"name": (
"Hydrogen, gaseous, 700 bar, from coal gasification, at H2 fuelling station",
"RER",
"kilogram",
"Hydrogen, gaseous, 700 bar, from coal gasification, at H2 fuelling station",
),
"additional electricity": 0,
},
"wood gasification": {
"name": (
"Hydrogen, gaseous, 700 bar, from dual fluidised bed gasification of woody biomass, at H2 fuelling station",
"CH",
"kilogram",
"Hydrogen, gaseous, 700 bar",
),
"additional electricity": 0,
},
"wood gasification with CCS": {
"name": (
"Hydrogen, gaseous, 700 bar, from dual fluidised bed gasification of woody biomass with CCS, at H2 fuelling station",
"CH",
"kilogram",
"Hydrogen, gaseous, 700 bar",
),
"additional electricity": 0,
},
"cng": {
"name": (
"market for natural gas, from high pressure network (1-5 bar), at service station",
"GLO",
"kilogram",
"natural gas, from high pressure network (1-5 bar), at service station",
),
"additional electricity": 0,
},
"biogas": {
"name": (
"biogas upgrading - sewage sludge - amine scrubbing - best",
"CH",
"kilogram",
"biogas upgrading - sewage sludge - amine scrubbing - best",
),
"additional electricity": 0,
},
"syngas": {
"name": (
"Methane production, synthetic, from electrochemical methanation",
"RER",
"kilogram",
"Methane, synthetic",
),
"additional electricity": 58 * 0.50779661,
},
"diesel": {
"name": (
"market for diesel",
"Europe without Switzerland",
"kilogram",
"diesel",
),
"additional electricity": 0,
},
"biodiesel - algae": {
"name": (
"Biodiesel from algae",
"RER",
"kilogram",
"Biodiesel from algae",
),
"additional electricity": 0,
},
"biodiesel - cooking oil": {
"name": (
"Biodiesel from cooking oil",
"RER",
"kilogram",
"Biodiesel from cooking oil",
),
"additional electricity": 0,
},
"synthetic diesel": {
"name": (
"Diesel production, synthetic, Fischer Tropsch process",
"RER",
"kilogram",
"Diesel, synthetic",
),
"additional electricity": 58 * 0.2875,
},
"petrol": {
"name": (
"market for petrol, low-sulfur",
"Europe without Switzerland",
"kilogram",
"petrol, low-sulfur",
),
"additional electricity": 0,
},
"bioethanol - wheat straw": {
"name": (
"Ethanol from wheat straw pellets",
"RER",
"kilogram",
"Ethanol from wheat straw pellets",
),
"additional electricity": 0,
},
"bioethanol - forest residues": {
"name": (
"Ethanol from forest residues",
"RER",
"kilogram",
"Ethanol from forest residues",
),
"additional electricity": 0,
},
"bioethanol - sugarbeet": {
"name": (
"Ethanol from sugarbeet",
"RER",
"kilogram",
"Ethanol from sugarbeet",
),
"additional electricity": 0,
},
"bioethanol - maize starch": {
"name": (
"Ethanol from maize starch",
"RER",
"kilogram",
"Ethanol from maize starch",
),
"additional electricity": 0,
},
"synthetic gasoline": {
"name": (
"Gasoline production, synthetic, from methanol",
"RER",
"kilogram",
"Gasoline, synthetic",
),
"additional electricity": 58 * 0.328,
},
}
d_dataset_name = {
"petrol": "fuel supply for gasoline vehicles, ",
"diesel": "fuel supply for diesel vehicles, ",
"cng": "fuel supply for gas vehicles, ",
"hydrogen": "fuel supply for hydrogen vehicles, ",
"electricity": "electricity supply for electric vehicles, ",
}
if fuel_type != "electricity":
for y in self.scope["year"]:
dataset_name = d_dataset_name[fuel_type] + str(y)
fuel_market_index = [
self.inputs[i] for i in self.inputs if i[0] == dataset_name
][0]
primary_fuel_activity_index = self.inputs[d_fuels[primary]["name"]]
secondary_fuel_activity_index = self.inputs[d_fuels[secondary]["name"]]
self.A[:, primary_fuel_activity_index, fuel_market_index] = (
-1 * primary_share[self.scope["year"].index(y)]
)
self.A[:, secondary_fuel_activity_index, fuel_market_index] = (
-1 * secondary_share[self.scope["year"].index(y)]
)
additional_electricity = (
d_fuels[primary]["additional electricity"]
* primary_share[self.scope["year"].index(y)]
) + (
d_fuels[secondary]["additional electricity"]
* secondary_share[self.scope["year"].index(y)]
)
if additional_electricity > 0:
electricity_mix_index = [
self.inputs[i]
for i in self.inputs
if i[0] == "electricity market for fuel preparation, " + str(y)
][0]
self.A[:, electricity_mix_index, fuel_market_index] = (
-1 * additional_electricity
)
else:
for y in self.scope["year"]:
dataset_name = d_dataset_name[fuel_type] + str(y)
electricity_market_index = [
self.inputs[i] for i in self.inputs if i[0] == dataset_name
][0]
electricity_mix_index = [
self.inputs[i]
for i in self.inputs
if i[0] == "electricity market for fuel preparation, " + str(y)
][0]
self.A[:, electricity_mix_index, electricity_market_index] = -1
def set_inputs_in_A_matrix(self, array):
"""
Fill-in the A matrix. Does not return anything. Modifies in place.
Shape of the A matrix (values, products, activities).
:param array: :attr:`array` from :class:`CarModel` class
"""
# Glider
self.A[
:,
self.inputs[
(
"market for glider, passenger car",
"GLO",
"kilogram",
"glider, passenger car",
)
],
-self.number_of_cars :,
] = (
(array[self.array_inputs["glider base mass"], :])
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[
("Glider lightweighting", "GLO", "kilogram", "Glider lightweighting")
],
-self.number_of_cars :,
] = (
(
array[self.array_inputs["lightweighting"], :]
* array[self.array_inputs["glider base mass"], :]
)
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[
(
"maintenance, passenger car",
"RER",
"unit",
"passenger car maintenance",
)
],
-self.number_of_cars :,
] = (array[self.array_inputs["curb mass"], :] / 1240 / 150000 * -1)
# Glider EoL
self.A[
:,
self.inputs[
(
"market for manual dismantling of used electric passenger car",
"GLO",
"unit",
"manual dismantling of used electric passenger car",
)
],
-self.number_of_cars :,
] = (
array[self.array_inputs["curb mass"], :]
* (1 - array[self.array_inputs["combustion power share"], :])
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[
(
"market for manual dismantling of used passenger car with internal combustion engine",
"GLO",
"unit",
"manual dismantling of used passenger car with internal combustion engine",
)
],
-self.number_of_cars :,
] = (
array[self.array_inputs["curb mass"], :]
* array[self.array_inputs["combustion power share"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
# Powertrain components
self.A[
:,
self.inputs[
(
"market for charger, electric passenger car",
"GLO",
"kilogram",
"charger, electric passenger car",
)
],
-self.number_of_cars :,
] = (
array[self.array_inputs["charger mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[
(
"market for converter, for electric passenger car",
"GLO",
"kilogram",
"converter, for electric passenger car",
)
],
-self.number_of_cars :,
] = (
array[self.array_inputs["converter mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[
(
"market for electric motor, electric passenger car",
"GLO",
"kilogram",
"electric motor, electric passenger car",
)
],
-self.number_of_cars :,
] = (
array[self.array_inputs["electric engine mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[
(
"market for inverter, for electric passenger car",
"GLO",
"kilogram",
"inverter, for electric passenger car",
)
],
-self.number_of_cars :,
] = (
array[self.array_inputs["inverter mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[
(
"market for power distribution unit, for electric passenger car",
"GLO",
"kilogram",
"power distribution unit, for electric passenger car",
)
],
-self.number_of_cars :,
] = (
array[self.array_inputs["power distribution unit mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
l_elec_pt = [
"charger mass",
"converter mass",
"inverter mass",
"power distribution unit mass",
"electric engine mass",
"fuel cell stack mass",
"fuel cell ancillary BoP mass",
"fuel cell essential BoP mass",
"battery cell mass",
"battery BoP mass",
]
self.A[
:,
self.inputs[
(
"market for used powertrain from electric passenger car, manual dismantling",
"GLO",
"kilogram",
"used powertrain from electric passenger car, manual dismantling",
)
],
-self.number_of_cars :,
] = (
array[[self.array_inputs[l] for l in l_elec_pt], :].sum(axis=0)
/ array[self.array_inputs["lifetime kilometers"], :]
)
self.A[
:,
self.inputs[
(
"market for internal combustion engine, passenger car",
"GLO",
"kilogram",
"internal combustion engine, for passenger car",
)
],
-self.number_of_cars :,
] = (
(
array[
[
self.array_inputs[l]
for l in ["combustion engine mass", "powertrain mass"]
],
:,
].sum(axis=0)
)
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[("Ancillary BoP", "GLO", "kilogram", "Ancillary BoP")],
-self.number_of_cars :,
] = (
array[self.array_inputs["fuel cell ancillary BoP mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[("Essential BoP", "GLO", "kilogram", "Essential BoP")],
-self.number_of_cars :,
] = (
array[self.array_inputs["fuel cell essential BoP mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[("Stack", "GLO", "kilowatt", "Stack")],
-self.number_of_cars :,
] = (
array[self.array_inputs["fuel cell stack mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
# Start of printout
print(
"****************** IMPORTANT BACKGROUND PARAMETERS ******************",
end="\n * ",
)
# Energy storage
print(
"The country of use is " + self.country, end="\n * ",
)
battery_tech = self.background_configuration["energy storage"]["electric"][
"type"
]
battery_origin = self.background_configuration["energy storage"]["electric"][
"origin"
]
print(
"Power and energy batteries produced in "
+ battery_origin
+ " using "
+ battery_tech
+ " chemistry.",
end="\n * ",
)
# Use the NMC inventory of Schmidt et al. 2019
self.A[
:,
self.inputs[("Battery BoP", "GLO", "kilogram", "Battery BoP")],
-self.number_of_cars :,
] = (
(
array[self.array_inputs["battery BoP mass"], :]
* (1 + array[self.array_inputs["battery lifetime replacements"], :])
)
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
battery_cell_label = (
"Battery cell, " + battery_tech,
"GLO",
"kilogram",
"Battery cell",
)
self.A[:, self.inputs[battery_cell_label], -self.number_of_cars :,] = (
(
array[self.array_inputs["battery cell mass"], :]
* (1 + array[self.array_inputs["fuel cell lifetime replacements"], :])
)
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
# Set an input of electricity, given the country of manufacture
self.A[
:,
self.inputs[
(
"market group for electricity, medium voltage",
"World",
"kilowatt hour",
"electricity, medium voltage",
)
],
self.inputs[battery_cell_label],
] = 0
for y in self.scope["year"]:
index = self.get_index_vehicle_from_array(y)
self.A[
np.ix_(
np.arange(self.iterations),
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0] and "Passenger" in i[0]
],
)
] = (
array[
self.array_inputs["battery cell production electricity"], :, index
].T
* self.A[
:,
self.inputs[battery_cell_label],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0] and "Passenger" in i[0]
],
]
).reshape(
self.iterations, 1, -1
)
index_A = [
self.inputs[c]
for c in self.inputs
if any(
ele in c[0]
for ele in ["ICEV-d", "ICEV-p", "HEV-p", "PHEV-p", "PHEV-d", "HEV-d"]
)
]
index = self.get_index_vehicle_from_array(
["ICEV-d", "ICEV-p", "HEV-p", "PHEV-p", "PHEV-d", "HEV-d"]
)
self.A[
:,
self.inputs[
(
"polyethylene production, high density, granulate",
"RER",
"kilogram",
"polyethylene, high density, granulate",
)
],
index_A,
] = (
array[self.array_inputs["fuel tank mass"], :, index]
/ array[self.array_inputs["lifetime kilometers"], :, index]
* -1
).T
index = self.get_index_vehicle_from_array("ICEV-g")
self.A[
:,
self.inputs[
(
"glass fibre reinforced plastic production, polyamide, injection moulded",
"RER",
"kilogram",
"glass fibre reinforced plastic, polyamide, injection moulded",
)
],
self.index_cng,
] = (
array[self.array_inputs["fuel tank mass"], :, index]
/ array[self.array_inputs["lifetime kilometers"], :, index]
* -1
).T
if "hydrogen" in self.background_configuration["energy storage"]:
# If a customization dict is passed
hydro_tank_technology = self.background_configuration["energy storage"][
"hydrogen"
]["type"]
else:
hydro_tank_technology = "carbon fiber"
dict_tank_map = {
"carbon fiber": (
"Fuel tank, compressed hydrogen gas, 700bar",
"GLO",
"kilogram",
"Fuel tank, compressed hydrogen gas, 700bar",
),
"hdpe": (
"Fuel tank, compressed hydrogen gas, 700bar, with HDPE liner",
"RER",
"kilogram",
"Hydrogen tank",
),
"aluminium": (
"Fuel tank, compressed hydrogen gas, 700bar, with aluminium liner",
"RER",
"kilogram",
"Hydrogen tank",
),
}
index = self.get_index_vehicle_from_array("FCEV")
self.A[
:, self.inputs[dict_tank_map[hydro_tank_technology]], self.index_fuel_cell,
] = (
array[self.array_inputs["fuel tank mass"], :, index]
/ array[self.array_inputs["lifetime kilometers"], :, index]
* -1
).T
for y in self.scope["year"]:
sum_renew, co2_intensity_tech = self.define_renewable_rate_in_mix()
if self.scope["year"].index(y) + 1 == len(self.scope["year"]):
end_str = "\n * "
else:
end_str = "\n \t * "
print(
"in "
+ str(y)
+ ", % of renewable: "
+ str(np.round(sum_renew * 100, 0))
+ "%"
+ ", GHG intensity per kWh: "
+ str(
int(
np.sum(
co2_intensity_tech * self.mix[self.scope["year"].index(y)]
)
)
)
+ " g. CO2-eq.",
end=end_str,
)
if any(
True for x in ["BEV", "PHEV-p", "PHEV-d"] if x in self.scope["powertrain"]
):
for y in self.scope["year"]:
index = self.get_index_vehicle_from_array(
["BEV", "PHEV-p", "PHEV-d"], y, method="and"
)
self.A[
np.ix_(
np.arange(self.iterations),
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity supply for electric vehicles" in i[0]
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "Passenger" in i[0]
and any(
True for x in ["BEV", "PHEV-p", "PHEV-d"] if x in i[0]
)
],
)
] = (
array[self.array_inputs["electricity consumption"], :, index] * -1
).T.reshape(
self.iterations, 1, -1
)
if "FCEV" in self.scope["powertrain"]:
index = self.get_index_vehicle_from_array("FCEV")
print(
"{} is completed by {}.".format(
self.fuel_blends["hydrogen"]["primary"]["type"],
self.fuel_blends["hydrogen"]["secondary"]["type"],
),
end="\n \t * ",
)
for y in self.scope["year"]:
if self.scope["year"].index(y) + 1 == len(self.scope["year"]):
end_str = "\n * "
else:
end_str = "\n \t * "
print(
"in "
+ str(y)
+ " _________________________________________ "
+ str(
np.round(
self.fuel_blends["hydrogen"]["secondary"]["share"][
self.scope["year"].index(y)
]
* 100,
0,
)
)
+ "%",
end=end_str,
)
# Primary fuel share
for y in self.scope["year"]:
ind_A = [
self.inputs[i]
for i in self.inputs
if str(y) in i[0] and "Passenger" in i[0] and "FCEV" in i[0]
]
ind_array = [
x for x in self.get_index_vehicle_from_array(y) if x in index
]
self.A[
:,
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "fuel supply for hydrogen vehicles" in i[0]
],
ind_A,
] = (
array[self.array_inputs["fuel mass"], :, ind_array]
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
if "ICEV-g" in self.scope["powertrain"]:
index = self.get_index_vehicle_from_array("ICEV-g")
print(
"{} is completed by {}.".format(
self.fuel_blends["cng"]["primary"]["type"],
self.fuel_blends["cng"]["secondary"]["type"],
),
end="\n \t * ",
)
for y in self.scope["year"]:
if self.scope["year"].index(y) + 1 == len(self.scope["year"]):
end_str = "\n * "
else:
end_str = "\n \t * "
print(
"in "
+ str(y)
+ " _________________________________________ "
+ str(
np.round(
self.fuel_blends["cng"]["secondary"]["share"][
self.scope["year"].index(y)
]
* 100,
0,
)
)
+ "%",
end=end_str,
)
# Primary fuel share
for y in self.scope["year"]:
ind_A = [
self.inputs[i]
for i in self.inputs
if str(y) in i[0] and "Passenger" in i[0] and "ICEV-g" in i[0]
]
ind_array = [
x for x in self.get_index_vehicle_from_array(y) if x in index
]
self.A[
:,
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0] and "fuel supply for gas vehicles" in i[0]
],
ind_A,
] = (
(array[self.array_inputs["fuel mass"], :, ind_array])
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Fuel-based emissions from CNG, CO2
# The share and CO2 emissions factor of CNG is retrieved, if used
share_fossil = 0
CO2_fossil = 0
if self.fuel_blends["cng"]["primary"]["type"] == "cng":
share_fossil += self.fuel_blends["cng"]["primary"]["share"][
self.scope["year"].index(y)
]
CO2_fossil = self.fuel_blends["cng"]["primary"]["CO2"]
if self.fuel_blends["cng"]["secondary"]["type"] == "cng":
share_fossil += self.fuel_blends["cng"]["secondary"]["share"][
self.scope["year"].index(y)
]
CO2_fossil = self.fuel_blends["cng"]["primary"]["CO2"]
self.A[
:,
self.inputs[("Carbon dioxide, fossil", ("air",), "kilogram")],
ind_A,
] = (
array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil * CO2_fossil
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Fuel-based CO2 emission from alternative petrol
# The share of non-fossil gas in the blend is retrieved
# As well as the CO2 emission factor of the fuel
share_non_fossil = 0
CO2_non_fossil = 0
if self.fuel_blends["cng"]["primary"]["type"] != "cng":
share_non_fossil += self.fuel_blends["cng"]["primary"]["share"][
self.scope["year"].index(y)
]
CO2_non_fossil = self.fuel_blends["cng"]["primary"]["CO2"]
if self.fuel_blends["cng"]["secondary"]["type"] != "cng":
share_non_fossil += self.fuel_blends["cng"]["secondary"]["share"][
self.scope["year"].index(y)
]
CO2_non_fossil = self.fuel_blends["cng"]["secondary"]["CO2"]
self.A[
:,
self.inputs[("Carbon dioxide, from soil or biomass stock", ("air",), "kilogram")],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_non_fossil * CO2_non_fossil)
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
if [i for i in self.scope["powertrain"] if i in ["ICEV-d", "PHEV-d", "HEV-d"]]:
index = self.get_index_vehicle_from_array(["ICEV-d", "PHEV-d", "HEV-d"])
print(
"{} is completed by {}.".format(
self.fuel_blends["diesel"]["primary"]["type"],
self.fuel_blends["diesel"]["secondary"]["type"],
),
end="\n \t * ",
)
for y in self.scope["year"]:
if self.scope["year"].index(y) + 1 == len(self.scope["year"]):
end_str = "\n * "
else:
end_str = "\n \t * "
print(
"in "
+ str(y)
+ " _________________________________________ "
+ str(
np.round(
self.fuel_blends["diesel"]["secondary"]["share"][
self.scope["year"].index(y)
]
* 100,
0,
)
)
+ "%",
end=end_str,
)
for y in self.scope["year"]:
ind_A = [
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "Passenger" in i[0]
and any(x in i[0] for x in ["ICEV-d", "PHEV-d", "HEV-d"])
]
ind_array = [
x for x in self.get_index_vehicle_from_array(y) if x in index
]
# Fuel supply
self.A[
:,
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0] and "fuel supply for diesel vehicles" in i[0]
],
ind_A,
] = (
(array[self.array_inputs["fuel mass"], :, ind_array])
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
share_fossil = 0
CO2_fossil = 0
# Fuel-based CO2 emission from conventional petrol
if self.fuel_blends["diesel"]["primary"]["type"] == "diesel":
share_fossil += self.fuel_blends["diesel"]["primary"]["share"][
self.scope["year"].index(y)
]
CO2_fossil = self.fuel_blends["diesel"]["primary"]["CO2"]
if self.fuel_blends["diesel"]["secondary"]["type"] == "diesel":
share_fossil += self.fuel_blends["diesel"]["secondary"]["share"][
self.scope["year"].index(y)
]
CO2_fossil = self.fuel_blends["diesel"]["secondary"]["CO2"]
self.A[
:,
self.inputs[("Carbon dioxide, fossil", ("air",), "kilogram")],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil * CO2_fossil)
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
share_non_fossil = 0
CO2_non_fossil = 0
# Fuel-based CO2 emission from alternative petrol
# The share of non-fossil fuel in the blend is retrieved
# As well as the CO2 emission factor of the fuel
if self.fuel_blends["diesel"]["primary"]["type"] != "diesel":
share_non_fossil += self.fuel_blends["diesel"]["primary"]["share"][
self.scope["year"].index(y)
]
CO2_non_fossil = self.fuel_blends["diesel"]["primary"]["CO2"]
if self.fuel_blends["diesel"]["secondary"]["type"] != "diesel":
share_non_fossil += self.fuel_blends["diesel"]["secondary"]["share"][
self.scope["year"].index(y)
]
CO2_non_fossil = self.fuel_blends["diesel"]["secondary"]["CO2"]
self.A[
:,
self.inputs[("Carbon dioxide, from soil or biomass stock", ("air",), "kilogram")],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_non_fossil * CO2_non_fossil)
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Heavy metals emissions from conventional diesel
# Emission factors from Spielmann et al., Transport Services Data v.2 (2007)
# Cadmium, 0.01 mg/kg diesel
self.A[
:,
self.inputs[
("Cadmium", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Copper, 1.7 mg/kg diesel
self.A[
:,
self.inputs[
("Copper", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.7e-6
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Chromium, 0.05 mg/kg diesel
self.A[
:,
self.inputs[
("Chromium", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 5.0e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Nickel, 0.07 mg/kg diesel
self.A[
:,
self.inputs[
("Nickel", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 7.0e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Selenium, 0.01 mg/kg diesel
self.A[
:,
self.inputs[
("Selenium", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.0e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Zinc, 1 mg/kg diesel
self.A[
:,
self.inputs[
("Zinc", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.0e-6
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Chromium VI, 0.0001 mg/kg diesel
self.A[
:,
self.inputs[
(
"Chromium VI",
("air", "urban air close to ground"),
"kilogram",
)
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.0e-10
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
if [i for i in self.scope["powertrain"] if i in ["ICEV-p", "HEV-p", "PHEV-p"]]:
index = self.get_index_vehicle_from_array(["ICEV-p", "HEV-p", "PHEV-p"])
print(
"{} is completed by {}.".format(
self.fuel_blends["petrol"]["primary"]["type"],
self.fuel_blends["petrol"]["secondary"]["type"],
),
end="\n \t * ",
)
for y in self.scope["year"]:
if self.scope["year"].index(y) + 1 == len(self.scope["year"]):
end_str = "\n * "
else:
end_str = "\n \t * "
print(
"in "
+ str(y)
+ " _________________________________________ "
+ str(
np.round(
self.fuel_blends["petrol"]["secondary"]["share"][
self.scope["year"].index(y)
]
* 100,
0,
)
)
+ "%",
end=end_str,
)
for y in self.scope["year"]:
ind_A = [
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "Passenger" in i[0]
and any(x in i[0] for x in ["ICEV-p", "HEV-p", "PHEV-p"])
]
ind_array = [
x for x in self.get_index_vehicle_from_array(y) if x in index
]
# Fuel supply
self.A[
:,
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "fuel supply for gasoline vehicles" in i[0]
],
ind_A,
] = (
(array[self.array_inputs["fuel mass"], :, ind_array])
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
share_fossil = 0
CO2_fossil = 0
# Fuel-based CO2 emission from conventional petrol
if self.fuel_blends["petrol"]["primary"]["type"] == "petrol":
share_fossil += self.fuel_blends["petrol"]["primary"]["share"][
self.scope["year"].index(y)
]
CO2_fossil = self.fuel_blends["petrol"]["primary"]["CO2"]
if self.fuel_blends["petrol"]["secondary"]["type"] == "petrol":
share_fossil += self.fuel_blends["petrol"]["secondary"]["share"][
self.scope["year"].index(y)
]
CO2_fossil = self.fuel_blends["petrol"]["secondary"]["CO2"]
self.A[
:,
self.inputs[("Carbon dioxide, fossil", ("air",), "kilogram")],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil * CO2_fossil)
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
share_non_fossil = 0
CO2_non_fossil = 0
# Fuel-based CO2 emission from alternative petrol
# The share of non-fossil fuel in the blend is retrieved
# As well as the CO2 emission factor of the fuel
if self.fuel_blends["petrol"]["primary"]["type"] != "petrol":
share_non_fossil += self.fuel_blends["petrol"]["primary"]["share"][
self.scope["year"].index(y)
]
CO2_non_fossil = self.fuel_blends["petrol"]["primary"]["CO2"]
if self.fuel_blends["petrol"]["secondary"]["type"] != "petrol":
share_non_fossil += self.fuel_blends["petrol"]["secondary"]["share"][
self.scope["year"].index(y)
]
CO2_non_fossil = self.fuel_blends["petrol"]["secondary"]["CO2"]
self.A[
:,
self.inputs[("Carbon dioxide, from soil or biomass stock", ("air",), "kilogram")],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_non_fossil * CO2_non_fossil)
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Heavy metals emissions from conventional petrol
# Cadmium, 0.01 mg/kg gasoline
self.A[
:,
self.inputs[
("Cadmium", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Copper, 1.7 mg/kg gasoline
self.A[
:,
self.inputs[
("Copper", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.7e-6
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Chromium, 0.05 mg/kg gasoline
self.A[
:,
self.inputs[
("Chromium", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 5.0e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Nickel, 0.07 mg/kg gasoline
self.A[
:,
self.inputs[
("Nickel", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 7.0e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Selenium, 0.01 mg/kg gasoline
self.A[
:,
self.inputs[
("Selenium", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.0e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Zinc, 1 mg/kg gasoline
self.A[
:,
self.inputs[
("Zinc", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.0e-6
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Chromium VI, 0.0001 mg/kg gasoline
self.A[
:,
self.inputs[
(
"Chromium VI",
("air", "urban air close to ground"),
"kilogram",
)
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.0e-10
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Non-exhaust emissions
self.A[
:,
self.inputs[
(
"market for road wear emissions, passenger car",
"GLO",
"kilogram",
"road wear emissions, passenger car",
)
],
-self.number_of_cars :,
] = (array[self.array_inputs["driving mass"], :] * 1e-08)
self.A[
:,
self.inputs[
(
"market for tyre wear emissions, passenger car",
"GLO",
"kilogram",
"tyre wear emissions, passenger car",
)
],
-self.number_of_cars :,
] = (array[self.array_inputs["driving mass"], :] * 6e-08)
self.A[
:,
self.inputs[
(
"market for brake wear emissions, passenger car",
"GLO",
"kilogram",
"brake wear emissions, passenger car",
)
],
-self.number_of_cars :,
] = (array[self.array_inputs["driving mass"], :] * 5e-09)
# Infrastructure
self.A[
:,
self.inputs[("market for road", "GLO", "meter-year", "road")],
-self.number_of_cars :,
] = (5.37e-7 * array[self.array_inputs["driving mass"], :] * -1)
# Infrastructure maintenance
self.A[
:,
self.inputs[
("market for road maintenance", "RER", "meter-year", "road maintenance")
],
-self.number_of_cars :,
] = (1.29e-3 * -1)
# Exhaust emissions
# Non-fuel based emissions
self.A[:, self.index_emissions, -self.number_of_cars :] = (
array[
[
self.array_inputs[self.map_non_fuel_emissions[self.rev_inputs[x]]]
for x in self.index_emissions
]
]
* -1
).transpose([1, 0, 2])
# Noise emissions
self.A[:, self.index_noise, -self.number_of_cars :] = (
array[
[
self.array_inputs[self.map_noise_emissions[self.rev_inputs[x]]]
for x in self.index_noise
]
]
* -1
).transpose([1, 0, 2])
print("*********************************************************************")
| 36.793037
| 150
| 0.42471
|
from . import DATA_DIR
import sys
import glob
from .background_systems import BackgroundSystemModel
from .export import ExportInventory
from inspect import currentframe, getframeinfo
from pathlib import Path
from scipy import sparse
import csv
import itertools
import numexpr as ne
import numpy as np
import xarray as xr
REMIND_FILES_DIR = DATA_DIR / "IAM"
class InventoryCalculation:
"""
Build and solve the inventory for results characterization and inventory export
Vehicles to be analyzed can be filtered by passing a `scope` dictionary.
Some assumptions in the background system can also be adjusted by passing a `background_configuration` dictionary.
.. code-block:: python
scope = {
'powertrain':['BEV', 'FCEV', 'ICEV-p'],
}
bc = {'country':'CH', # considers electricity network losses for Switzerland
'custom electricity mix' : [[1,0,0,0,0,0,0,0,0,0], # in this case, 100% hydropower for the first year
[0,1,0,0,0,0,0,0,0,0],
[0,0,1,0,0,0,0,0,0,0],
[0,0,0,1,0,0,0,0,0,0],
], # in this case, 100% nuclear for the second year
'fuel blend':{
'cng':{ #specify fuel bland for compressed gas
'primary fuel':{
'type':'biogas',
'share':[0.9, 0.8, 0.7, 0.6] # shares per year. Must total 1 for each year.
},
'secondary fuel':{
'type':'syngas',
'share': [0.1, 0.2, 0.3, 0.4]
}
},
'diesel':{
'primary fuel':{
'type':'synthetic diesel',
'share':[0.9, 0.8, 0.7, 0.6]
},
'secondary fuel':{
'type':'biodiesel - cooking oil',
'share': [0.1, 0.2, 0.3, 0.4]
}
},
'petrol':{
'primary fuel':{
'type':'petrol',
'share':[0.9, 0.8, 0.7, 0.6]
},
'secondary fuel':{
'type':'bioethanol - wheat straw',
'share': [0.1, 0.2, 0.3, 0.4]
}
},
'hydrogen':{
'primary fuel':{'type':'electrolysis', 'share':[1, 0, 0, 0]},
'secondary fuel':{'type':'smr - natural gas', 'share':[0, 1, 1, 1]}
}
},
'energy storage': {
'electric': {
'type':'NMC',
'origin': 'NO'
},
'hydrogen': {
'type':'carbon fiber'
}
}
}
InventoryCalculation(CarModel.array,
background_configuration=background_configuration,
scope=scope,
scenario="RCP26")
The `custom electricity mix` key in the background_configuration dictionary defines an electricity mix to apply,
under the form of one or several array(s), depending on teh number of years to analyze,
that should total 1, of which the indices correspond to:
- [0]: hydro-power
- [1]: nuclear
- [2]: natural gas
- [3]: solar power
- [4]: wind power
- [5]: biomass
- [6]: coal
- [7]: oil
- [8]: geothermal
- [9]: waste incineration
If none is given, the electricity mix corresponding to the country specified in `country` will be selected.
If no country is specified, Europe applies.
The `primary` and `secondary` fuel keys contain an array with shares of alternative petrol fuel for each year, to create a custom blend.
If none is provided, a blend provided by the Integrated Assessment model REMIND is used, which will depend on the REMIND energy scenario selected.
Here is a list of available fuel pathways:
Hydrogen technologies
--------------------
electrolysis
smr - natural gas
smr - natural gas with CCS
smr - biogas
smr - biogas with CCS
coal gasification
wood gasification
wood gasification with CCS
Natural gas technologies
------------------------
cng
biogas
syngas
Diesel technologies
-------------------
diesel
biodiesel - algae
biodiesel - cooking oil
synthetic diesel
Petrol technologies
-------------------
petrol
bioethanol - wheat straw
bioethanol - maize starch
bioethanol - sugarbeet
bioethanol - forest residues
synthetic gasoline
:ivar array: array from the CarModel class
:vartype array: CarModel.array
:ivar scope: dictionary that contains filters for narrowing the analysis
:ivar background_configuration: dictionary that contains choices for background system
:ivar scenario: REMIND energy scenario to use ("SSP2-Baseline": business-as-usual,
"SSP2-PkBudg1100": limits cumulative GHG emissions to 1,100 gigatons by 2100,
"static": no forward-looking modification of the background inventories).
"SSP2-Baseline" selected by default.
.. code-block:: python
"""
def __init__(
self, array, scope=None, background_configuration=None, scenario="SSP2-Base", method="recipe", method_type="midpoint"
):
if scope is None:
scope = {}
scope["size"] = array.coords["size"].values.tolist()
scope["powertrain"] = array.coords["powertrain"].values.tolist()
scope["year"] = array.coords["year"].values.tolist()
else:
scope["size"] = scope.get("size", array.coords["size"].values.tolist())
scope["powertrain"] = scope.get(
"powertrain", array.coords["powertrain"].values.tolist()
)
scope["year"] = scope.get("year", array.coords["year"].values.tolist())
self.scope = scope
self.scenario = scenario
array = array.sel(
powertrain=self.scope["powertrain"],
year=self.scope["year"],
size=self.scope["size"],
)
self.array = array.stack(desired=["size", "powertrain", "year"])
self.iterations = len(array.value.values)
self.number_of_cars = (
len(self.scope["size"])
* len(self.scope["powertrain"])
* len(self.scope["year"])
)
self.array_inputs = {
x: i for i, x in enumerate(list(self.array.parameter.values), 0)
}
self.array_powertrains = {
x: i for i, x in enumerate(list(self.array.powertrain.values), 0)
}
if not background_configuration is None:
self.background_configuration = background_configuration
else:
self.background_configuration = {}
if "energy storage" not in self.background_configuration:
self.background_configuration["energy storage"] = {
"electric": {"type": "NMC", "origin": "CN"}
}
else:
if "electric" not in self.background_configuration["energy storage"]:
self.background_configuration["energy storage"]["electric"] = {
"type": "NMC",
"origin": "CN",
}
else:
if (
"origin"
not in self.background_configuration["energy storage"]["electric"]
):
self.background_configuration["energy storage"]["electric"][
"origin"
] = "CN"
if (
"type"
not in self.background_configuration["energy storage"]["electric"]
):
self.background_configuration["energy storage"]["electric"][
"type"
] = "NMC"
self.inputs = self.get_dict_input()
self.bs = BackgroundSystemModel()
self.country = self.get_country_of_use()
self.add_additional_activities()
self.rev_inputs = self.get_rev_dict_input()
self.A = self.get_A_matrix()
self.mix = self.define_electricity_mix_for_fuel_prep()
self.fuel_blends = {}
self.define_fuel_blends()
self.set_actual_range()
self.index_cng = [self.inputs[i] for i in self.inputs if "ICEV-g" in i[0]]
self.index_combustion_wo_cng = [
self.inputs[i]
for i in self.inputs
if any(
ele in i[0]
for ele in ["ICEV-p", "HEV-p", "PHEV-p", "ICEV-d", "PHEV-d", "HEV-d"]
)
]
self.index_diesel = [self.inputs[i] for i in self.inputs if "ICEV-d" in i[0]]
self.index_all_petrol = [
self.inputs[i]
for i in self.inputs
if any(ele in i[0] for ele in ["ICEV-p", "HEV-p", "PHEV-p"])
]
self.index_petrol = [self.inputs[i] for i in self.inputs if "ICEV-p" in i[0]]
self.index_hybrid = [
self.inputs[i]
for i in self.inputs
if any(ele in i[0] for ele in ["HEV-p", "HEV-d"])
]
self.index_plugin_hybrid = [
self.inputs[i] for i in self.inputs if "PHEV" in i[0]
]
self.index_fuel_cell = [self.inputs[i] for i in self.inputs if "FCEV" in i[0]]
self.map_non_fuel_emissions = {
(
"Methane, fossil",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Methane direct emissions, suburban",
(
"Methane, fossil",
("air", "low population density, long-term"),
"kilogram",
): "Methane direct emissions, rural",
(
"Lead",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Lead direct emissions, suburban",
(
"Ammonia",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Ammonia direct emissions, suburban",
(
"NMVOC, non-methane volatile organic compounds, unspecified origin",
("air", "urban air close to ground"),
"kilogram",
): "NMVOC direct emissions, urban",
(
"PAH, polycyclic aromatic hydrocarbons",
("air", "urban air close to ground"),
"kilogram",
): "Hydrocarbons direct emissions, urban",
(
"Dinitrogen monoxide",
("air", "low population density, long-term"),
"kilogram",
): "Dinitrogen oxide direct emissions, rural",
(
"Nitrogen oxides",
("air", "urban air close to ground"),
"kilogram",
): "Nitrogen oxides direct emissions, urban",
(
"Ammonia",
("air", "urban air close to ground"),
"kilogram",
): "Ammonia direct emissions, urban",
(
"Particulates, < 2.5 um",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Particulate matters direct emissions, suburban",
(
"Carbon monoxide, fossil",
("air", "urban air close to ground"),
"kilogram",
): "Carbon monoxide direct emissions, urban",
(
"Nitrogen oxides",
("air", "low population density, long-term"),
"kilogram",
): "Nitrogen oxides direct emissions, rural",
(
"NMVOC, non-methane volatile organic compounds, unspecified origin",
("air", "non-urban air or from high stacks"),
"kilogram",
): "NMVOC direct emissions, suburban",
(
"Benzene",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Benzene direct emissions, suburban",
(
"Ammonia",
("air", "low population density, long-term"),
"kilogram",
): "Ammonia direct emissions, rural",
(
"Sulfur dioxide",
("air", "low population density, long-term"),
"kilogram",
): "Sulfur dioxide direct emissions, rural",
(
"NMVOC, non-methane volatile organic compounds, unspecified origin",
("air", "low population density, long-term"),
"kilogram",
): "NMVOC direct emissions, rural",
(
"Particulates, < 2.5 um",
("air", "urban air close to ground"),
"kilogram",
): "Particulate matters direct emissions, urban",
(
"Sulfur dioxide",
("air", "urban air close to ground"),
"kilogram",
): "Sulfur dioxide direct emissions, urban",
(
"Dinitrogen monoxide",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Dinitrogen oxide direct emissions, suburban",
(
"Carbon monoxide, fossil",
("air", "low population density, long-term"),
"kilogram",
): "Carbon monoxide direct emissions, rural",
(
"Methane, fossil",
("air", "urban air close to ground"),
"kilogram",
): "Methane direct emissions, urban",
(
"Carbon monoxide, fossil",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Carbon monoxide direct emissions, suburban",
(
"Lead",
("air", "urban air close to ground"),
"kilogram",
): "Lead direct emissions, urban",
(
"Particulates, < 2.5 um",
("air", "low population density, long-term"),
"kilogram",
): "Particulate matters direct emissions, rural",
(
"Sulfur dioxide",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Sulfur dioxide direct emissions, suburban",
(
"Benzene",
("air", "low population density, long-term"),
"kilogram",
): "Benzene direct emissions, rural",
(
"Nitrogen oxides",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Nitrogen oxides direct emissions, suburban",
(
"Lead",
("air", "low population density, long-term"),
"kilogram",
): "Lead direct emissions, rural",
(
"Benzene",
("air", "urban air close to ground"),
"kilogram",
): "Benzene direct emissions, urban",
(
"PAH, polycyclic aromatic hydrocarbons",
("air", "low population density, long-term"),
"kilogram",
): "Hydrocarbons direct emissions, rural",
(
"PAH, polycyclic aromatic hydrocarbons",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Hydrocarbons direct emissions, suburban",
(
"Dinitrogen monoxide",
("air", "urban air close to ground"),
"kilogram",
): "Dinitrogen oxide direct emissions, urban",
}
self.index_emissions = [
self.inputs[i] for i in self.map_non_fuel_emissions.keys()
]
self.map_noise_emissions = {
(
"noise, octave 1, day time, urban",
("octave 1", "day time", "urban"),
"joule",
): "noise, octave 1, day time, urban",
(
"noise, octave 2, day time, urban",
("octave 2", "day time", "urban"),
"joule",
): "noise, octave 2, day time, urban",
(
"noise, octave 3, day time, urban",
("octave 3", "day time", "urban"),
"joule",
): "noise, octave 3, day time, urban",
(
"noise, octave 4, day time, urban",
("octave 4", "day time", "urban"),
"joule",
): "noise, octave 4, day time, urban",
(
"noise, octave 5, day time, urban",
("octave 5", "day time", "urban"),
"joule",
): "noise, octave 5, day time, urban",
(
"noise, octave 6, day time, urban",
("octave 6", "day time", "urban"),
"joule",
): "noise, octave 6, day time, urban",
(
"noise, octave 7, day time, urban",
("octave 7", "day time", "urban"),
"joule",
): "noise, octave 7, day time, urban",
(
"noise, octave 8, day time, urban",
("octave 8", "day time", "urban"),
"joule",
): "noise, octave 8, day time, urban",
(
"noise, octave 1, day time, suburban",
("octave 1", "day time", "suburban"),
"joule",
): "noise, octave 1, day time, suburban",
(
"noise, octave 2, day time, suburban",
("octave 2", "day time", "suburban"),
"joule",
): "noise, octave 2, day time, suburban",
(
"noise, octave 3, day time, suburban",
("octave 3", "day time", "suburban"),
"joule",
): "noise, octave 3, day time, suburban",
(
"noise, octave 4, day time, suburban",
("octave 4", "day time", "suburban"),
"joule",
): "noise, octave 4, day time, suburban",
(
"noise, octave 5, day time, suburban",
("octave 5", "day time", "suburban"),
"joule",
): "noise, octave 5, day time, suburban",
(
"noise, octave 6, day time, suburban",
("octave 6", "day time", "suburban"),
"joule",
): "noise, octave 6, day time, suburban",
(
"noise, octave 7, day time, suburban",
("octave 7", "day time", "suburban"),
"joule",
): "noise, octave 7, day time, suburban",
(
"noise, octave 8, day time, suburban",
("octave 8", "day time", "suburban"),
"joule",
): "noise, octave 8, day time, suburban",
(
"noise, octave 1, day time, rural",
("octave 1", "day time", "rural"),
"joule",
): "noise, octave 1, day time, rural",
(
"noise, octave 2, day time, rural",
("octave 2", "day time", "rural"),
"joule",
): "noise, octave 2, day time, rural",
(
"noise, octave 3, day time, rural",
("octave 3", "day time", "rural"),
"joule",
): "noise, octave 3, day time, rural",
(
"noise, octave 4, day time, rural",
("octave 4", "day time", "rural"),
"joule",
): "noise, octave 4, day time, rural",
(
"noise, octave 5, day time, rural",
("octave 5", "day time", "rural"),
"joule",
): "noise, octave 5, day time, rural",
(
"noise, octave 6, day time, rural",
("octave 6", "day time", "rural"),
"joule",
): "noise, octave 6, day time, rural",
(
"noise, octave 7, day time, rural",
("octave 7", "day time", "rural"),
"joule",
): "noise, octave 7, day time, rural",
(
"noise, octave 8, day time, rural",
("octave 8", "day time", "rural"),
"joule",
): "noise, octave 8, day time, rural",
}
self.elec_map = {
"Hydro": (
"electricity production, hydro, run-of-river",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Nuclear": (
"electricity production, nuclear, pressure water reactor",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Gas": (
"electricity production, natural gas, conventional power plant",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Solar": (
"electricity production, photovoltaic, 3kWp slanted-roof installation, multi-Si, panel, mounted",
"DE",
"kilowatt hour",
"electricity, low voltage",
),
"Wind": (
"electricity production, wind, 1-3MW turbine, onshore",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Biomass": (
"heat and power co-generation, wood chips, 6667 kW, state-of-the-art 2014",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Coal": (
"electricity production, hard coal",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Oil": (
"electricity production, oil",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Geo": (
"electricity production, deep geothermal",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Waste": (
"treatment of municipal solid waste, incineration",
"DE",
"kilowatt hour",
"electricity, for reuse in municipal waste incineration only",
),
}
self.index_noise = [self.inputs[i] for i in self.map_noise_emissions.keys()]
self.list_cat, self.split_indices = self.get_split_indices()
self.method = method
if self.method == "recipe":
self.method_type = method_type
else:
self.method_type = "midpoint"
self.impact_categories = self.get_dict_impact_categories()
# Load the B matrix
self.B = self.get_B_matrix()
def __getitem__(self, key):
"""
Make class['foo'] automatically filter for the parameter 'foo'
Makes the model code much cleaner
:param key: Parameter name
:type key: str
:return: `array` filtered after the parameter selected
"""
return self.temp_array.sel(parameter=key)
def get_results_table(self, split, sensitivity=False):
"""
Format an xarray.DataArray array to receive the results.
:param split: "components" or "impact categories". Split by impact categories only applicable when "endpoint" level is applied.
:return: xarrray.DataArray
"""
if split == "components":
cat = [
"direct - exhaust",
"direct - non-exhaust",
"energy chain",
"maintenance",
"glider",
"EoL",
"powertrain",
"energy storage",
"road",
]
dict_impact_cat = list(self.impact_categories.keys())
if sensitivity == False:
response = xr.DataArray(
np.zeros(
(
self.B.shape[1],
len(self.scope["size"]),
len(self.scope["powertrain"]),
len(self.scope["year"]),
len(cat),
self.iterations,
)
),
coords=[
dict_impact_cat,
self.scope["size"],
self.scope["powertrain"],
self.scope["year"],
cat,
np.arange(0, self.iterations),
],
dims=[
"impact_category",
"size",
"powertrain",
"year",
"impact",
"value",
],
)
else:
params = [a for a in self.array.value.values]
response = xr.DataArray(
np.zeros(
(
self.B.shape[1],
len(self.scope["size"]),
len(self.scope["powertrain"]),
len(self.scope["year"]),
self.iterations,
)
),
coords=[
dict_impact_cat,
self.scope["size"],
self.scope["powertrain"],
self.scope["year"],
params,
],
dims=["impact_category", "size", "powertrain", "year", "parameter"],
)
return response
def get_split_indices(self):
"""
Return list of indices to split the results into categories.
:return: list of indices
:rtype: list
"""
filename = "dict_split.csv"
filepath = DATA_DIR / filename
if not filepath.is_file():
raise FileNotFoundError("The dictionary of splits could not be found.")
with open(filepath) as f:
csv_list = [[val.strip() for val in r.split(";")] for r in f.readlines()]
(_, _, *header), *data = csv_list
csv_dict = {}
for row in data:
key, sub_key, *values = row
if key in csv_dict:
if sub_key in csv_dict[key]:
csv_dict[key][sub_key].append(
{"search by": values[0], "search for": values[1]}
)
else:
csv_dict[key][sub_key] = [
{"search by": values[0], "search for": values[1]}
]
else:
csv_dict[key] = {
sub_key: [{"search by": values[0], "search for": values[1]}]
}
flatten = itertools.chain.from_iterable
d = {}
l = []
d['direct - exhaust'] = []
d['direct - exhaust'].append(
self.inputs[("Carbon dioxide, fossil", ("air",), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Carbon dioxide, from soil or biomass stock", ("air",), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Cadmium", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Copper", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Chromium", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Nickel", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Selenium", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Zinc", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Chromium VI", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].extend(self.index_emissions)
d['direct - exhaust'].extend(self.index_noise)
l.append(d['direct - exhaust'])
for cat in csv_dict["components"]:
d[cat] = list(
flatten(
[
self.get_index_of_flows([l["search for"]], l["search by"])
for l in csv_dict["components"][cat]
]
)
)
l.append(d[cat])
list_ind = [d[x] for x in d]
maxLen = max(map(len, list_ind))
for row in list_ind:
while len(row) < maxLen:
row.extend([len(self.inputs) - 1])
return list(d.keys()), list_ind
def calculate_impacts(
self, split="components", sensitivity=False
):
# Prepare an array to store the results
results = self.get_results_table(split, sensitivity=sensitivity)
# Create electricity and fuel market datasets
self.create_electricity_market_for_fuel_prep()
# Create electricity market dataset for battery production
self.create_electricity_market_for_battery_production()
# Fill in the A matrix with car parameters
self.set_inputs_in_A_matrix(self.array.values)
# Collect indices of activities contributing to the first level
arr = self.A[0, : -self.number_of_cars, -self.number_of_cars :].sum(axis=1)
ind = np.nonzero(arr)[0]
new_arr = np.float32(
np.zeros((self.A.shape[1], self.B.shape[1], len(self.scope["year"])))
)
f = np.float32(np.zeros((np.shape(self.A)[1])))
for y in self.scope["year"]:
if self.scenario != "static":
B = self.B.interp(year=y, kwargs={"fill_value": "extrapolate"}).values
else:
B = self.B[0].values
for a in ind:
f[:] = 0
f[a] = 1
X = np.float32(sparse.linalg.spsolve(self.A[0], f.T))
C = X * B
new_arr[a, :, self.scope["year"].index(y)] = C.sum(axis=1)
new_arr = new_arr.T.reshape(
len(self.scope["year"]), B.shape[0], 1, 1, self.A.shape[-1]
)
a = np.float32(self.A[:, :, -self.number_of_cars :].transpose(0, 2, 1))
arr = np.float32(ne.evaluate("a * new_arr * -1"))
arr = arr.transpose(1, 3, 0, 4, 2)
arr = arr[:, :, :, self.split_indices, :].sum(axis=4)
if not sensitivity:
for y in range(0, len(self.scope["year"])):
results[:, :, :, y, :, :] = arr[
:, y :: len(self.scope["year"]), y, :, :
].reshape(
(
B.shape[0],
len(self.scope["size"]),
len(self.scope["powertrain"]),
len(results.impact.values),
self.iterations,
)
)
else:
for y in range(0, len(self.scope["year"])):
results[:, :, :, y, :] = (
arr[:, y :: len(self.scope["year"]), y, :]
.sum(axis=2)
.reshape(
(
B.shape[0],
len(self.scope["size"]),
len(self.scope["powertrain"]),
self.iterations,
)
)
)
results /= results.sel(parameter="reference")
return results.astype("float32")
def add_additional_activities(self):
# Add as many rows and columns as cars to consider
# Also add additional columns and rows for electricity markets
# for fuel preparation and energy battery production
maximum = max(self.inputs.values())
for y in self.scope["year"]:
if {"ICEV-p", "HEV-p", "PHEV-p"}.intersection(
set(self.scope["powertrain"])
):
maximum += 1
self.inputs[
(
"fuel supply for gasoline vehicles, " + str(y),
self.country,
"kilogram",
"fuel",
)
] = maximum
if {"ICEV-d", "HEV-d", "PHEV-d"}.intersection(
set(self.scope["powertrain"])
):
maximum += 1
self.inputs[
(
"fuel supply for diesel vehicles, " + str(y),
self.country,
"kilogram",
"fuel",
)
] = maximum
if {"ICEV-g"}.intersection(set(self.scope["powertrain"])):
maximum += 1
self.inputs[
(
"fuel supply for gas vehicles, " + str(y),
self.country,
"kilogram",
"fuel",
)
] = maximum
if {"FCEV"}.intersection(set(self.scope["powertrain"])):
maximum += 1
self.inputs[
(
"fuel supply for hydrogen vehicles, " + str(y),
self.country,
"kilogram",
"fuel",
)
] = maximum
if {"BEV", "PHEV-p", "PHEV-d"}.intersection(set(self.scope["powertrain"])):
maximum += 1
self.inputs[
(
"electricity supply for electric vehicles, " + str(y),
self.country,
"kilowatt hour",
"electricity, low voltage, for battery electric vehicles",
)
] = maximum
maximum += 1
self.inputs[
(
"electricity market for fuel preparation, " + str(y),
self.country,
"kilowatt hour",
"electricity, low voltage",
)
] = maximum
maximum += 1
self.inputs[
(
"electricity market for energy storage production, " + str(y),
self.background_configuration["energy storage"]["electric"][
"origin"
],
"kilowatt hour",
"electricity, low voltage, for energy storage production",
)
] = maximum
for s in self.scope["size"]:
for pt in self.scope["powertrain"]:
for y in self.scope["year"]:
maximum += 1
if y < 1993:
euro_class = "EURO-0"
if 1993 <= y < 1997:
euro_class = "EURO-1"
if 1997 <= y < 2001:
euro_class = "EURO-2"
if 2001 <= y < 2006:
euro_class = "EURO-3"
if 2006 <= y < 2011:
euro_class = "EURO-4"
if 2001 <= y < 2015:
euro_class = "EURO-5"
if y >= 2015:
euro_class = "EURO-6"
name = (
"Passenger car, "
+ pt
+ ", "
+ s
+ ", "
+ str(y)
+ ", "
+ euro_class
)
self.inputs[
(
name,
self.background_configuration["country"],
"kilometer",
"transport, passenger car, " + euro_class,
)
] = maximum
def get_A_matrix(self):
"""
Load the A matrix. The A matrix contains exchanges of products (rows) between activities (columns).
:return: A matrix with three dimensions of shape (number of values, number of products, number of activities).
:rtype: numpy.ndarray
"""
filename = "A_matrix.csv"
filepath = (
Path(getframeinfo(currentframe()).filename)
.resolve()
.parent.joinpath("data/" + filename)
)
if not filepath.is_file():
raise FileNotFoundError("The technology matrix could not be found.")
initial_A = np.genfromtxt(filepath, delimiter=";")
new_A = np.identity(len(self.inputs))
new_A[0 : np.shape(initial_A)[0], 0 : np.shape(initial_A)[0]] = initial_A
# Resize the matrix to fit the number of iterations in `array`
new_A = np.resize(new_A, (self.array.shape[1], new_A.shape[0], new_A.shape[1]))
return new_A
def get_B_matrix(self):
"""
Load the B matrix. The B matrix contains impact assessment figures for a give impact assessment method,
per unit of activity. Its length column-wise equals the length of the A matrix row-wise.
Its length row-wise equals the number of impact assessment methods.
:param method: only "recipe" and "ilcd" available at the moment.
:param level: only "midpoint" available at the moment.
:return: an array with impact values per unit of activity for each method.
:rtype: numpy.ndarray
"""
if self.method == "recipe":
if self.method_type == "midpoint":
list_file_names = glob.glob(
str(REMIND_FILES_DIR) + "/*recipe_midpoint*{}*.csv".format(self.scenario)
)
B = np.zeros((len(list_file_names), 21, len(self.inputs)))
else:
list_file_names = glob.glob(
str(REMIND_FILES_DIR) + "/*recipe_endpoint*{}*.csv".format(self.scenario)
)
B = np.zeros((len(list_file_names), 3, len(self.inputs)))
else:
list_file_names = glob.glob(
str(REMIND_FILES_DIR) + "/*ilcd*{}*.csv".format(self.scenario)
)
B = np.zeros((len(list_file_names), 19, len(self.inputs)))
for f in list_file_names:
initial_B = np.genfromtxt(f, delimiter=";")
new_B = np.zeros((np.shape(initial_B)[0], len(self.inputs),))
new_B[0 : np.shape(initial_B)[0], 0 : np.shape(initial_B)[1]] = initial_B
B[list_file_names.index(f), :, :] = new_B
list_impact_categories = list(self.impact_categories.keys())
if self.scenario != "static":
response = xr.DataArray(
B,
coords=[
[2005, 2010, 2020, 2030, 2040, 2050],
list_impact_categories,
list(self.inputs.keys()),
],
dims=["year", "category", "activity"],
)
else:
response = xr.DataArray(
B,
coords=[
[2020],
list_impact_categories,
list(self.inputs.keys()),
],
dims=["year", "category", "activity"],
)
return response
def get_dict_input(self):
"""
Load a dictionary with tuple ("name of activity", "location", "unit", "reference product") as key, row/column
indices as values.
:return: dictionary with `label:index` pairs.
:rtype: dict
"""
filename = "dict_inputs_A_matrix.csv"
filepath = DATA_DIR / filename
if not filepath.is_file():
raise FileNotFoundError(
"The dictionary of activity labels could not be found."
)
csv_dict = {}
count = 0
with open(filepath) as f:
input_dict = csv.reader(f, delimiter=";")
for row in input_dict:
if "(" in row[1]:
new_str = row[1].replace("(", "")
new_str = new_str.replace(")", "")
new_str = [s.strip() for s in new_str.split(",") if s]
t = ()
for s in new_str:
if "low population" in s:
s = "low population density, long-term"
t += (s,)
break
else:
t += (s.replace("'", ""),)
csv_dict[(row[0], t, row[2])] = count
else:
csv_dict[(row[0], row[1], row[2], row[3])] = count
count += 1
return csv_dict
def get_dict_impact_categories(self):
"""
Load a dictionary with available impact assessment methods as keys, and assessment level and categories as values.
..code-block:: python
{'recipe': {'midpoint': ['freshwater ecotoxicity',
'human toxicity',
'marine ecotoxicity',
'terrestrial ecotoxicity',
'metal depletion',
'agricultural land occupation',
'climate change',
'fossil depletion',
'freshwater eutrophication',
'ionising radiation',
'marine eutrophication',
'natural land transformation',
'ozone depletion',
'particulate matter formation',
'photochemical oxidant formation',
'terrestrial acidification',
'urban land occupation',
'water depletion',
'human noise',
'primary energy, non-renewable',
'primary energy, renewable']
}
}
:return: dictionary
:rtype: dict
"""
filename = "dict_impact_categories.csv"
filepath = DATA_DIR / filename
if not filepath.is_file():
raise FileNotFoundError(
"The dictionary of impact categories could not be found."
)
csv_dict = {}
with open(filepath) as f:
input_dict = csv.reader(f, delimiter=";")
for row in input_dict:
if row[0] == self.method and row[3] == self.method_type:
csv_dict[row[2]] = {'method':row[1],
'category':row[2],
'type':row[3],
'abbreviation':row[4],
'unit':row[5],
'source':row[6]}
return csv_dict
def get_rev_dict_input(self):
"""
Reverse the self.inputs dictionary.
:return: reversed dictionary
:rtype: dict
"""
return {v: k for k, v in self.inputs.items()}
def get_index_vehicle_from_array(
self, items_to_look_for, items_to_look_for_also=None, method="or"
):
"""
Return list of row/column indices of self.array of labels that contain the string defined in `items_to_look_for`.
:param items_to_look_for: string to search for
:return: list
"""
if not isinstance(items_to_look_for, list):
items_to_look_for = [items_to_look_for]
if not items_to_look_for_also is None:
if not isinstance(items_to_look_for_also, list):
items_to_look_for_also = [items_to_look_for_also]
list_vehicles = self.array.desired.values.tolist()
if method == "or":
return [
list_vehicles.index(c)
for c in list_vehicles
if set(items_to_look_for).intersection(c)
]
if method == "and":
return [
list_vehicles.index(c)
for c in list_vehicles
if set(items_to_look_for).intersection(c)
and set(items_to_look_for_also).intersection(c)
]
def get_index_of_flows(self, items_to_look_for, search_by="name"):
"""
Return list of row/column indices of self.A of labels that contain the string defined in `items_to_look_for`.
:param items_to_look_for: string
:param search_by: "name" or "compartment" (for elementary flows)
:return: list of row/column indices
:rtype: list
"""
if search_by == "name":
return [
int(self.inputs[c])
for c in self.inputs
if all(ele in c[0].lower() for ele in items_to_look_for)
]
if search_by == "compartment":
return [
int(self.inputs[c])
for c in self.inputs
if all(ele in c[1] for ele in items_to_look_for)
]
def export_lci(
self,
presamples=True,
ecoinvent_compatibility=True,
ecoinvent_version="3.6",
db_name="carculator db",
):
"""
Export the inventory as a dictionary. Also return a list of arrays that contain pre-sampled random values if
:meth:`stochastic` of :class:`CarModel` class has been called.
:param presamples: boolean.
:param ecoinvent_compatibility: bool. If True, compatible with ecoinvent. If False, compatible with REMIND-ecoinvent.
:param ecoinvent_version: str. "3.5", "3.6" or "uvek"
:return: inventory, and optionally, list of arrays containing pre-sampled values.
:rtype: list
"""
# Create electricity and fuel market datasets
self.create_electricity_market_for_fuel_prep()
# Create electricity market dataset for battery production
self.create_electricity_market_for_battery_production()
self.set_inputs_in_A_matrix(self.array.values)
if presamples == True:
lci, array = ExportInventory(
self.A, self.rev_inputs, db_name=db_name
).write_lci(presamples, ecoinvent_compatibility, ecoinvent_version)
return (lci, array)
else:
lci = ExportInventory(self.A, self.rev_inputs, db_name=db_name).write_lci(
presamples, ecoinvent_compatibility, ecoinvent_version
)
return lci
def export_lci_to_bw(
self,
presamples=True,
ecoinvent_compatibility=True,
ecoinvent_version="3.6",
db_name="carculator db",
):
"""
Export the inventory as a `brightway2` bw2io.importers.base_lci.LCIImporter object
with the inventory in the `data` attribute.
.. code-block:: python
# get the inventory
i, _ = ic.export_lci_to_bw()
# import it in a Brightway2 project
i.match_database('ecoinvent 3.6 cutoff', fields=('name', 'unit', 'location', 'reference product'))
i.match_database("biosphere3", fields=('name', 'unit', 'categories'))
i.match_database(fields=('name', 'unit', 'location', 'reference product'))
i.match_database(fields=('name', 'unit', 'categories'))
# Create an additional biosphere database for the few flows that do not
# exist in "biosphere3"
i.create_new_biosphere("additional_biosphere", relink=True)
# Check if all exchanges link
i.statistics()
# Register the database
i.write_database()
:return: LCIImport object that can be directly registered in a `brightway2` project.
:rtype: bw2io.importers.base_lci.LCIImporter
"""
# Create electricity and fuel market datasets
self.create_electricity_market_for_fuel_prep()
# Create electricity market dataset for battery production
self.create_electricity_market_for_battery_production()
self.set_inputs_in_A_matrix(self.array.values)
if presamples == True:
lci, array = ExportInventory(
self.A, self.rev_inputs, db_name=db_name
).write_lci_to_bw(presamples, ecoinvent_compatibility, ecoinvent_version)
return (lci, array)
else:
lci = ExportInventory(
self.A, self.rev_inputs, db_name=db_name
).write_lci_to_bw(presamples, ecoinvent_compatibility, ecoinvent_version)
return lci
def export_lci_to_excel(
self,
directory=None,
ecoinvent_compatibility=True,
ecoinvent_version="3.6",
software_compatibility="brightway2",
filename=None,
):
"""
Export the inventory as an Excel file (if the destination software is Brightway2) or a CSV file (if the destination software is Simapro) file.
Also return the file path where the file is stored.
:param directory: directory where to save the file.
:type directory: str
:param ecoinvent_compatibility: If True, compatible with ecoinvent. If False, compatible with REMIND-ecoinvent.
:param ecoinvent_version: "3.6", "3.5" or "uvek"
:param software_compatibility: "brightway2" or "simapro"
:return: file path where the file is stored.
:rtype: str
"""
if software_compatibility not in ("brightway2", "simapro"):
raise NameError(
"The destination software argument is not valid. Choose between 'brightway2' or 'simapro'."
)
# Simapro inventory only for ecoinvent 3.5 or UVEK
if software_compatibility == "simapro":
if ecoinvent_version == "3.6":
print(
"Simapro-compatible inventory export is only available for ecoinvent 3.5 or UVEK."
)
return
ecoinvent_compatibility = True
ecoinvent_version = "3.5"
# Create electricity and fuel market datasets
self.create_electricity_market_for_fuel_prep()
# Create electricity market dataset for battery production
self.create_electricity_market_for_battery_production()
self.set_inputs_in_A_matrix(self.array.values)
fp = ExportInventory(
self.A, self.rev_inputs, db_name=filename or "carculator db"
).write_lci_to_excel(
directory,
ecoinvent_compatibility,
ecoinvent_version,
software_compatibility,
filename,
)
return fp
def get_country_of_use(self):
if "country" not in self.background_configuration:
self.background_configuration["country"] = "RER"
return self.background_configuration["country"]
def define_electricity_mix_for_fuel_prep(self):
"""
This function defines a fuel mix based either on user-defined mix, or on default mixes for a given country.
The mix is calculated as the average mix, weighted by the distribution of annually driven kilometers.
:return:
"""
try:
losses_to_low = float(self.bs.losses[self.country]["LV"])
except KeyError:
# If losses for the country are not found, assume EU average
losses_to_low = float(self.bs.losses["RER"]["LV"])
if "custom electricity mix" in self.background_configuration:
# If a special electricity mix is specified, we use it
mix = self.background_configuration["custom electricity mix"]
else:
use_year = [
int(i)
for i in (
self.array.values[
self.array_inputs["lifetime kilometers"],
:,
self.get_index_vehicle_from_array(
[
"BEV",
"FCEV",
"PHEV-p",
"PHEV-d",
"ICEV-p",
"ICEV-d",
"HEV-p",
"HEV-d",
"ICEV-g",
]
),
]
/ self.array.values[
self.array_inputs["kilometers per year"],
:,
self.get_index_vehicle_from_array(
[
"BEV",
"FCEV",
"PHEV-p",
"PHEV-d",
"ICEV-p",
"ICEV-d",
"HEV-p",
"HEV-d",
"ICEV-g",
]
),
]
)
.mean(axis=1)
.reshape(-1, len(self.scope["year"]))
.mean(axis=0)
]
mix = [
self.bs.electricity_mix.sel(
country=self.country,
variable=[
"Hydro",
"Nuclear",
"Gas",
"Solar",
"Wind",
"Biomass",
"Coal",
"Oil",
"Geothermal",
"Waste",
],
)
.interp(
year=np.arange(y, y + use_year[self.scope["year"].index(y)]),
kwargs={"fill_value": "extrapolate"},
)
.mean(axis=0)
.values
if y + use_year[self.scope["year"].index(y)] <= 2050
else self.bs.electricity_mix.sel(
country=self.country,
variable=[
"Hydro",
"Nuclear",
"Gas",
"Solar",
"Wind",
"Biomass",
"Coal",
"Oil",
"Geothermal",
"Waste",
],
)
.interp(year=np.arange(y, 2051), kwargs={"fill_value": "extrapolate"})
.mean(axis=0)
.values
for y in self.scope["year"]
]
return mix
def define_renewable_rate_in_mix(self):
try:
losses_to_low = float(self.bs.losses[self.country]["LV"])
except KeyError:
# If losses for the country are not found, assume EU average
losses_to_low = float(self.bs.losses["RER"]["LV"])
for y in self.scope["year"]:
if self.scenario == "static":
if self.method == "recipe":
if self.method_type == "midpoint":
co2_intensity_tech = (
self.B.sel(
category="climate change",
year=2020,
activity=list(self.elec_map.values()),
).values
* losses_to_low
) * 1000
else:
co2_intensity_tech = 0
else:
co2_intensity_tech = (
self.B.sel(
category="climate change - climate change fossil",
year=2020,
activity=list(self.elec_map.values()),
).values
* losses_to_low
) * 1000
else:
if self.method == "recipe":
if self.method_type == "midpoint":
co2_intensity_tech = (
self.B.sel(
category="climate change", activity=list(self.elec_map.values())
)
.interp(year=y, kwargs={"fill_value": "extrapolate"})
.values
* losses_to_low
) * 1000
else:
co2_intensity_tech = 0
else:
co2_intensity_tech = (
self.B.sel(
category="climate change - climate change fossil", activity=list(self.elec_map.values())
)
.interp(year=y, kwargs={"fill_value": "extrapolate"})
.values
* losses_to_low
) * 1000
sum_renew = (
self.mix[self.scope["year"].index(y)][0]
+ self.mix[self.scope["year"].index(y)][3]
+ self.mix[self.scope["year"].index(y)][4]
+ self.mix[self.scope["year"].index(y)][5]
+ self.mix[self.scope["year"].index(y)][8]
)
return sum_renew, co2_intensity_tech
def create_electricity_market_for_fuel_prep(self):
""" This function fills the electricity market that supplies battery charging operations
and hydrogen production through electrolysis.
"""
try:
losses_to_low = float(self.bs.losses[self.country]["LV"])
except KeyError:
# If losses for the country are not found, assume EU average
losses_to_low = float(self.bs.losses["RER"]["LV"])
# Fill the electricity markets for battery charging and hydrogen production
for y in self.scope["year"]:
m = np.array(self.mix[self.scope["year"].index(y)]).reshape(-1, 10, 1)
# Add electricity technology shares
self.A[
np.ix_(
np.arange(self.iterations),
[self.inputs[self.elec_map[t]] for t in self.elec_map],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for fuel preparation" in i[0]
],
)
] = (m * -1 * losses_to_low)
# Add transmission network for high and medium voltage
self.A[
:,
self.inputs[
(
"transmission network construction, electricity, high voltage",
"CH",
"kilometer",
"transmission network, electricity, high voltage",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for fuel preparation" in i[0]
],
] = (6.58e-9 * -1 * losses_to_low)
self.A[
:,
self.inputs[
(
"transmission network construction, electricity, medium voltage",
"CH",
"kilometer",
"transmission network, electricity, medium voltage",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for fuel preparation" in i[0]
],
] = (1.86e-8 * -1 * losses_to_low)
self.A[
:,
self.inputs[
(
"transmission network construction, long-distance",
"UCTE",
"kilometer",
"transmission network, long-distance",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for fuel preparation" in i[0]
],
] = (3.17e-10 * -1 * losses_to_low)
# Add distribution network, low voltage
self.A[
:,
self.inputs[
(
"distribution network construction, electricity, low voltage",
"CH",
"kilometer",
"distribution network, electricity, low voltage",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for fuel preparation" in i[0]
],
] = (8.74e-8 * -1 * losses_to_low)
# Add supply of sulfur hexafluoride for transformers
self.A[
:,
self.inputs[
(
"market for sulfur hexafluoride, liquid",
"RER",
"kilogram",
"sulfur hexafluoride, liquid",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for fuel preparation" in i[0]
],
] = ((5.4e-8 + 2.99e-9) * -1 * losses_to_low)
# Add SF_6 leakage
self.A[
:,
self.inputs[("Sulfur hexafluoride", ("air",), "kilogram")],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for fuel preparation" in i[0]
],
] = ((5.4e-8 + 2.99e-9) * -1 * losses_to_low)
def create_electricity_market_for_battery_production(self):
"""
This function fills in the column in `self.A` concerned with the electricity mix used for manufacturing battery cells
:return:
"""
battery_tech = self.background_configuration["energy storage"]["electric"][
"type"
]
battery_origin = self.background_configuration["energy storage"]["electric"][
"origin"
]
try:
losses_to_low = float(self.bs.losses[battery_origin]["LV"])
except KeyError:
losses_to_low = float(self.bs.losses["CN"]["LV"])
mix_battery_manufacturing = (
self.bs.electricity_mix.sel(
country=battery_origin,
variable=[
"Hydro",
"Nuclear",
"Gas",
"Solar",
"Wind",
"Biomass",
"Coal",
"Oil",
"Geothermal",
"Waste",
],
)
.interp(year=self.scope["year"], kwargs={"fill_value": "extrapolate"})
.values
)
# Fill the electricity markets for battery production
for y in self.scope["year"]:
m = np.array(
mix_battery_manufacturing[self.scope["year"].index(y)]
).reshape(-1, 10, 1)
self.A[
np.ix_(
np.arange(self.iterations),
[self.inputs[self.elec_map[t]] for t in self.elec_map],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
)
] = (m * losses_to_low * -1)
# Add transmission network for high and medium voltage
self.A[
:,
self.inputs[
(
"transmission network construction, electricity, high voltage",
"CH",
"kilometer",
"transmission network, electricity, high voltage",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
] = (6.58e-9 * -1 * losses_to_low)
self.A[
:,
self.inputs[
(
"transmission network construction, electricity, medium voltage",
"CH",
"kilometer",
"transmission network, electricity, medium voltage",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
] = (1.86e-8 * -1 * losses_to_low)
self.A[
:,
self.inputs[
(
"transmission network construction, long-distance",
"UCTE",
"kilometer",
"transmission network, long-distance",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
] = (3.17e-10 * -1 * losses_to_low)
# Add distribution network, low voltage
self.A[
:,
self.inputs[
(
"distribution network construction, electricity, low voltage",
"CH",
"kilometer",
"distribution network, electricity, low voltage",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
] = (8.74e-8 * -1 * losses_to_low)
# Add supply of sulfur hexafluoride for transformers
self.A[
:,
self.inputs[
(
"market for sulfur hexafluoride, liquid",
"RER",
"kilogram",
"sulfur hexafluoride, liquid",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
] = ((5.4e-8 + 2.99e-9) * -1 * losses_to_low)
# Add SF_6 leakage
self.A[
:,
self.inputs[("Sulfur hexafluoride", ("air",), "kilogram")],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
] = ((5.4e-8 + 2.99e-9) * -1 * losses_to_low)
def get_share_biofuel(self):
region = self.bs.region_map[self.country]["RegionCode"]
scenario = self.scenario if self.scenario != "static" else "SSP2-Base"
share_biofuel = (
self.bs.biofuel.sel(
region=region, value=0, fuel_type="Biomass fuel", scenario=scenario,
)
.interp(year=self.scope["year"], kwargs={"fill_value": "extrapolate"})
.values
)
return share_biofuel
def find_fuel_shares(self, fuel_type):
default_fuels = {
"petrol": {"primary": "petrol", "secondary": "bioethanol - wheat straw"},
"diesel": {"primary": "diesel", "secondary": "biodiesel - cooking oil"},
"cng": {"primary": "cng", "secondary": "biogas"},
"hydrogen": {"primary": "electrolysis", "secondary": "smr - natural gas"},
}
if "fuel blend" in self.background_configuration:
if fuel_type in self.background_configuration["fuel blend"]:
primary = self.background_configuration["fuel blend"][fuel_type][
"primary fuel"
]["type"]
try:
secondary = self.background_configuration["fuel blend"][fuel_type][
"secondary fuel"
]["type"]
except:
secondary = default_fuels[fuel_type]["secondary"]
primary_share = self.background_configuration["fuel blend"][fuel_type][
"primary fuel"
]["share"]
secondary_share = 1 - np.array(primary_share)
else:
primary = default_fuels[fuel_type]["primary"]
secondary = default_fuels[fuel_type]["secondary"]
secondary_share = self.get_share_biofuel()
primary_share = 1 - np.array(secondary_share)
else:
primary = default_fuels[fuel_type]["primary"]
secondary = default_fuels[fuel_type]["secondary"]
secondary_share = self.get_share_biofuel()
primary_share = 1 - np.array(secondary_share)
return (primary, secondary, primary_share, secondary_share)
def set_actual_range(self):
"""
Set the actual range considering the blend.
Liquid bio-fuels and synthetic fuels typically have a lower calorific value. Hence, the need to recalculate
the vehicle range.
Modifies parameter `range` of `array` in place
"""
if {"ICEV-p", "HEV-p", "PHEV-p"}.intersection(set(self.scope["powertrain"])):
for y in self.scope["year"]:
share_primary = self.fuel_blends["petrol"]["primary"]["share"][
self.scope["year"].index(y)
]
lhv_primary = self.fuel_blends["petrol"]["primary"]["lhv"]
share_secondary = self.fuel_blends["petrol"]["secondary"]["share"][
self.scope["year"].index(y)
]
lhv_secondary = self.fuel_blends["petrol"]["secondary"]["lhv"]
index = self.get_index_vehicle_from_array(
["ICEV-p", "HEV-p", "PHEV-p"], y, method="and"
)
self.array.values[self.array_inputs["range"], :, index] = (
(
(
self.array.values[self.array_inputs["fuel mass"], :, index]
* share_primary
* lhv_primary
)
+ (
self.array.values[self.array_inputs["fuel mass"], :, index]
* share_secondary
* lhv_secondary
)
)
* 1000
/ self.array.values[self.array_inputs["TtW energy"], :, index]
)
if {"ICEV-d", "HEV-d", "PHEV-d"}.intersection(set(self.scope["powertrain"])):
for y in self.scope["year"]:
share_primary = self.fuel_blends["diesel"]["primary"]["share"][
self.scope["year"].index(y)
]
lhv_primary = self.fuel_blends["diesel"]["primary"]["lhv"]
share_secondary = self.fuel_blends["diesel"]["secondary"]["share"][
self.scope["year"].index(y)
]
lhv_secondary = self.fuel_blends["diesel"]["secondary"]["lhv"]
index = self.get_index_vehicle_from_array(
["ICEV-d", "PHEV-d", "HEV-d"], y, method="and"
)
self.array.values[self.array_inputs["range"], :, index] = (
(
(
self.array.values[self.array_inputs["fuel mass"], :, index]
* share_primary
* lhv_primary
)
+ (
self.array.values[self.array_inputs["fuel mass"], :, index]
* share_secondary
* lhv_secondary
)
)
* 1000
/ self.array.values[self.array_inputs["TtW energy"], :, index]
)
def define_fuel_blends(self):
"""
This function defines fuel blends from what is passed in `background_configuration`.
It populates a dictionary `self.fuel_blends` that contains the respective shares, lower heating values
and CO2 emission factors of the fuels used.
:return:
"""
fuels_lhv = {
"petrol": 42.4,
"bioethanol - wheat straw": 26.8,
"bioethanol - maize starch": 26.8,
"bioethanol - sugarbeet": 26.8,
"bioethanol - forest residues": 26.8,
"synthetic gasoline": 42.4,
"diesel": 42.8,
"biodiesel - cooking oil": 31.7,
"biodiesel - algae": 31.7,
"synthetic diesel": 43.3,
"cng": 55.5,
"biogas": 55.5,
"syngas": 55.5
}
fuels_CO2 = {
"petrol": 3.18,
"bioethanol - wheat straw": 1.91,
"bioethanol - maize starch": 1.91,
"bioethanol - sugarbeet": 1.91,
"bioethanol - forest residues": 1.91,
"synthetic gasoline": 3.18,
"diesel": 3.14,
"biodiesel - cooking oil": 2.85,
"biodiesel - algae": 2.85,
"synthetic diesel": 3.16,
"cng": 2.65,
"biogas": 2.65,
"syngas": 2.65
}
if {"ICEV-p", "HEV-p", "PHEV-p"}.intersection(set(self.scope["powertrain"])):
fuel_type = "petrol"
primary, secondary, primary_share, secondary_share = self.find_fuel_shares(
fuel_type
)
self.create_fuel_markets(
fuel_type, primary, secondary, primary_share, secondary_share
)
self.fuel_blends[fuel_type] = {
"primary": {
"type": primary,
"share": primary_share,
"lhv": fuels_lhv[primary],
"CO2": fuels_CO2[primary],
},
"secondary": {
"type": secondary,
"share": secondary_share,
"lhv": fuels_lhv[secondary],
"CO2": fuels_CO2[secondary],
},
}
if {"ICEV-d", "HEV-d", "PHEV-d"}.intersection(set(self.scope["powertrain"])):
fuel_type = "diesel"
primary, secondary, primary_share, secondary_share = self.find_fuel_shares(
fuel_type
)
self.create_fuel_markets(
fuel_type, primary, secondary, primary_share, secondary_share
)
self.fuel_blends[fuel_type] = {
"primary": {
"type": primary,
"share": primary_share,
"lhv": fuels_lhv[primary],
"CO2": fuels_CO2[primary],
},
"secondary": {
"type": secondary,
"share": secondary_share,
"lhv": fuels_lhv[secondary],
"CO2": fuels_CO2[secondary],
},
}
if {"ICEV-g"}.intersection(set(self.scope["powertrain"])):
fuel_type = "cng"
primary, secondary, primary_share, secondary_share = self.find_fuel_shares(
fuel_type
)
self.create_fuel_markets(
fuel_type, primary, secondary, primary_share, secondary_share
)
self.fuel_blends[fuel_type] = {
"primary": {"type": primary,
"share": primary_share,
"lhv": fuels_lhv[primary],
"CO2": fuels_CO2[primary]},
"secondary": {"type": secondary,
"share": secondary_share,
"lhv": fuels_lhv[primary],
"CO2": fuels_CO2[primary]},
}
if {"FCEV"}.intersection(set(self.scope["powertrain"])):
fuel_type = "hydrogen"
primary, secondary, primary_share, secondary_share = self.find_fuel_shares(
fuel_type
)
self.create_fuel_markets(
fuel_type, primary, secondary, primary_share, secondary_share
)
self.fuel_blends[fuel_type] = {
"primary": {"type": primary, "share": primary_share},
"secondary": {"type": secondary, "share": secondary_share},
}
if {"BEV", "PHEV-p", "PHEV-d"}.intersection(set(self.scope["powertrain"])):
fuel_type = "electricity"
self.create_fuel_markets(fuel_type)
def create_fuel_markets(
self,
fuel_type,
primary=None,
secondary=None,
primary_share=None,
secondary_share=None,
):
"""
This function creates markets for fuel, considering a given blend, a given fuel type and a given year.
It also adds separate electricity input in case hydrogen from electrolysis is needed somewhere in the fuel supply chain.
:return:
"""
d_fuels = {
"electrolysis": {
"name": (
"Hydrogen, gaseous, 700 bar, from electrolysis, at H2 fuelling station",
"RER",
"kilogram",
"Hydrogen, gaseous, 700 bar, from electrolysis, at H2 fuelling station",
),
"additional electricity": 58,
},
"smr - natural gas": {
"name": (
"Hydrogen, gaseous, 700 bar, from SMR NG w/o CCS, at H2 fuelling station",
"RER",
"kilogram",
"Hydrogen, gaseous, 700 bar, from SMR NG w/o CCS, at H2 fuelling station",
),
"additional electricity": 0,
},
"smr - natural gas with CCS": {
"name": (
"Hydrogen, gaseous, 700 bar, from SMR NG w CCS, at H2 fuelling station",
"RER",
"kilogram",
"Hydrogen, gaseous, 700 bar, from SMR NG w CCS, at H2 fuelling station",
),
"additional electricity": 0,
},
"smr - biogas": {
"name": (
"Hydrogen, gaseous, 700 bar, from SMR of biogas, at H2 fuelling station",
"RER",
"kilogram",
"Hydrogen, gaseous, 700 bar, from SMR of biogas, at H2 fuelling station",
),
"additional electricity": 0,
},
"smr - biogas with CCS": {
"name": (
"Hydrogen, gaseous, 700 bar, from SMR of biogas with CCS, at H2 fuelling station",
"RER",
"kilogram",
"Hydrogen, gaseous, 700 bar, from SMR of biogas with CCS, at H2 fuelling station",
),
"additional electricity": 0,
},
"coal gasification": {
"name": (
"Hydrogen, gaseous, 700 bar, from coal gasification, at H2 fuelling station",
"RER",
"kilogram",
"Hydrogen, gaseous, 700 bar, from coal gasification, at H2 fuelling station",
),
"additional electricity": 0,
},
"wood gasification": {
"name": (
"Hydrogen, gaseous, 700 bar, from dual fluidised bed gasification of woody biomass, at H2 fuelling station",
"CH",
"kilogram",
"Hydrogen, gaseous, 700 bar",
),
"additional electricity": 0,
},
"wood gasification with CCS": {
"name": (
"Hydrogen, gaseous, 700 bar, from dual fluidised bed gasification of woody biomass with CCS, at H2 fuelling station",
"CH",
"kilogram",
"Hydrogen, gaseous, 700 bar",
),
"additional electricity": 0,
},
"cng": {
"name": (
"market for natural gas, from high pressure network (1-5 bar), at service station",
"GLO",
"kilogram",
"natural gas, from high pressure network (1-5 bar), at service station",
),
"additional electricity": 0,
},
"biogas": {
"name": (
"biogas upgrading - sewage sludge - amine scrubbing - best",
"CH",
"kilogram",
"biogas upgrading - sewage sludge - amine scrubbing - best",
),
"additional electricity": 0,
},
"syngas": {
"name": (
"Methane production, synthetic, from electrochemical methanation",
"RER",
"kilogram",
"Methane, synthetic",
),
"additional electricity": 58 * 0.50779661,
},
"diesel": {
"name": (
"market for diesel",
"Europe without Switzerland",
"kilogram",
"diesel",
),
"additional electricity": 0,
},
"biodiesel - algae": {
"name": (
"Biodiesel from algae",
"RER",
"kilogram",
"Biodiesel from algae",
),
"additional electricity": 0,
},
"biodiesel - cooking oil": {
"name": (
"Biodiesel from cooking oil",
"RER",
"kilogram",
"Biodiesel from cooking oil",
),
"additional electricity": 0,
},
"synthetic diesel": {
"name": (
"Diesel production, synthetic, Fischer Tropsch process",
"RER",
"kilogram",
"Diesel, synthetic",
),
"additional electricity": 58 * 0.2875,
},
"petrol": {
"name": (
"market for petrol, low-sulfur",
"Europe without Switzerland",
"kilogram",
"petrol, low-sulfur",
),
"additional electricity": 0,
},
"bioethanol - wheat straw": {
"name": (
"Ethanol from wheat straw pellets",
"RER",
"kilogram",
"Ethanol from wheat straw pellets",
),
"additional electricity": 0,
},
"bioethanol - forest residues": {
"name": (
"Ethanol from forest residues",
"RER",
"kilogram",
"Ethanol from forest residues",
),
"additional electricity": 0,
},
"bioethanol - sugarbeet": {
"name": (
"Ethanol from sugarbeet",
"RER",
"kilogram",
"Ethanol from sugarbeet",
),
"additional electricity": 0,
},
"bioethanol - maize starch": {
"name": (
"Ethanol from maize starch",
"RER",
"kilogram",
"Ethanol from maize starch",
),
"additional electricity": 0,
},
"synthetic gasoline": {
"name": (
"Gasoline production, synthetic, from methanol",
"RER",
"kilogram",
"Gasoline, synthetic",
),
"additional electricity": 58 * 0.328,
},
}
d_dataset_name = {
"petrol": "fuel supply for gasoline vehicles, ",
"diesel": "fuel supply for diesel vehicles, ",
"cng": "fuel supply for gas vehicles, ",
"hydrogen": "fuel supply for hydrogen vehicles, ",
"electricity": "electricity supply for electric vehicles, ",
}
if fuel_type != "electricity":
for y in self.scope["year"]:
dataset_name = d_dataset_name[fuel_type] + str(y)
fuel_market_index = [
self.inputs[i] for i in self.inputs if i[0] == dataset_name
][0]
primary_fuel_activity_index = self.inputs[d_fuels[primary]["name"]]
secondary_fuel_activity_index = self.inputs[d_fuels[secondary]["name"]]
self.A[:, primary_fuel_activity_index, fuel_market_index] = (
-1 * primary_share[self.scope["year"].index(y)]
)
self.A[:, secondary_fuel_activity_index, fuel_market_index] = (
-1 * secondary_share[self.scope["year"].index(y)]
)
additional_electricity = (
d_fuels[primary]["additional electricity"]
* primary_share[self.scope["year"].index(y)]
) + (
d_fuels[secondary]["additional electricity"]
* secondary_share[self.scope["year"].index(y)]
)
if additional_electricity > 0:
electricity_mix_index = [
self.inputs[i]
for i in self.inputs
if i[0] == "electricity market for fuel preparation, " + str(y)
][0]
self.A[:, electricity_mix_index, fuel_market_index] = (
-1 * additional_electricity
)
else:
for y in self.scope["year"]:
dataset_name = d_dataset_name[fuel_type] + str(y)
electricity_market_index = [
self.inputs[i] for i in self.inputs if i[0] == dataset_name
][0]
electricity_mix_index = [
self.inputs[i]
for i in self.inputs
if i[0] == "electricity market for fuel preparation, " + str(y)
][0]
self.A[:, electricity_mix_index, electricity_market_index] = -1
def set_inputs_in_A_matrix(self, array):
"""
Fill-in the A matrix. Does not return anything. Modifies in place.
Shape of the A matrix (values, products, activities).
:param array: :attr:`array` from :class:`CarModel` class
"""
# Glider
self.A[
:,
self.inputs[
(
"market for glider, passenger car",
"GLO",
"kilogram",
"glider, passenger car",
)
],
-self.number_of_cars :,
] = (
(array[self.array_inputs["glider base mass"], :])
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[
("Glider lightweighting", "GLO", "kilogram", "Glider lightweighting")
],
-self.number_of_cars :,
] = (
(
array[self.array_inputs["lightweighting"], :]
* array[self.array_inputs["glider base mass"], :]
)
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[
(
"maintenance, passenger car",
"RER",
"unit",
"passenger car maintenance",
)
],
-self.number_of_cars :,
] = (array[self.array_inputs["curb mass"], :] / 1240 / 150000 * -1)
# Glider EoL
self.A[
:,
self.inputs[
(
"market for manual dismantling of used electric passenger car",
"GLO",
"unit",
"manual dismantling of used electric passenger car",
)
],
-self.number_of_cars :,
] = (
array[self.array_inputs["curb mass"], :]
* (1 - array[self.array_inputs["combustion power share"], :])
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[
(
"market for manual dismantling of used passenger car with internal combustion engine",
"GLO",
"unit",
"manual dismantling of used passenger car with internal combustion engine",
)
],
-self.number_of_cars :,
] = (
array[self.array_inputs["curb mass"], :]
* array[self.array_inputs["combustion power share"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
# Powertrain components
self.A[
:,
self.inputs[
(
"market for charger, electric passenger car",
"GLO",
"kilogram",
"charger, electric passenger car",
)
],
-self.number_of_cars :,
] = (
array[self.array_inputs["charger mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[
(
"market for converter, for electric passenger car",
"GLO",
"kilogram",
"converter, for electric passenger car",
)
],
-self.number_of_cars :,
] = (
array[self.array_inputs["converter mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[
(
"market for electric motor, electric passenger car",
"GLO",
"kilogram",
"electric motor, electric passenger car",
)
],
-self.number_of_cars :,
] = (
array[self.array_inputs["electric engine mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[
(
"market for inverter, for electric passenger car",
"GLO",
"kilogram",
"inverter, for electric passenger car",
)
],
-self.number_of_cars :,
] = (
array[self.array_inputs["inverter mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[
(
"market for power distribution unit, for electric passenger car",
"GLO",
"kilogram",
"power distribution unit, for electric passenger car",
)
],
-self.number_of_cars :,
] = (
array[self.array_inputs["power distribution unit mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
l_elec_pt = [
"charger mass",
"converter mass",
"inverter mass",
"power distribution unit mass",
"electric engine mass",
"fuel cell stack mass",
"fuel cell ancillary BoP mass",
"fuel cell essential BoP mass",
"battery cell mass",
"battery BoP mass",
]
self.A[
:,
self.inputs[
(
"market for used powertrain from electric passenger car, manual dismantling",
"GLO",
"kilogram",
"used powertrain from electric passenger car, manual dismantling",
)
],
-self.number_of_cars :,
] = (
array[[self.array_inputs[l] for l in l_elec_pt], :].sum(axis=0)
/ array[self.array_inputs["lifetime kilometers"], :]
)
self.A[
:,
self.inputs[
(
"market for internal combustion engine, passenger car",
"GLO",
"kilogram",
"internal combustion engine, for passenger car",
)
],
-self.number_of_cars :,
] = (
(
array[
[
self.array_inputs[l]
for l in ["combustion engine mass", "powertrain mass"]
],
:,
].sum(axis=0)
)
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[("Ancillary BoP", "GLO", "kilogram", "Ancillary BoP")],
-self.number_of_cars :,
] = (
array[self.array_inputs["fuel cell ancillary BoP mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[("Essential BoP", "GLO", "kilogram", "Essential BoP")],
-self.number_of_cars :,
] = (
array[self.array_inputs["fuel cell essential BoP mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[("Stack", "GLO", "kilowatt", "Stack")],
-self.number_of_cars :,
] = (
array[self.array_inputs["fuel cell stack mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
# Start of printout
print(
"****************** IMPORTANT BACKGROUND PARAMETERS ******************",
end="\n * ",
)
# Energy storage
print(
"The country of use is " + self.country, end="\n * ",
)
battery_tech = self.background_configuration["energy storage"]["electric"][
"type"
]
battery_origin = self.background_configuration["energy storage"]["electric"][
"origin"
]
print(
"Power and energy batteries produced in "
+ battery_origin
+ " using "
+ battery_tech
+ " chemistry.",
end="\n * ",
)
# Use the NMC inventory of Schmidt et al. 2019
self.A[
:,
self.inputs[("Battery BoP", "GLO", "kilogram", "Battery BoP")],
-self.number_of_cars :,
] = (
(
array[self.array_inputs["battery BoP mass"], :]
* (1 + array[self.array_inputs["battery lifetime replacements"], :])
)
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
battery_cell_label = (
"Battery cell, " + battery_tech,
"GLO",
"kilogram",
"Battery cell",
)
self.A[:, self.inputs[battery_cell_label], -self.number_of_cars :,] = (
(
array[self.array_inputs["battery cell mass"], :]
* (1 + array[self.array_inputs["fuel cell lifetime replacements"], :])
)
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
# Set an input of electricity, given the country of manufacture
self.A[
:,
self.inputs[
(
"market group for electricity, medium voltage",
"World",
"kilowatt hour",
"electricity, medium voltage",
)
],
self.inputs[battery_cell_label],
] = 0
for y in self.scope["year"]:
index = self.get_index_vehicle_from_array(y)
self.A[
np.ix_(
np.arange(self.iterations),
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0] and "Passenger" in i[0]
],
)
] = (
array[
self.array_inputs["battery cell production electricity"], :, index
].T
* self.A[
:,
self.inputs[battery_cell_label],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0] and "Passenger" in i[0]
],
]
).reshape(
self.iterations, 1, -1
)
index_A = [
self.inputs[c]
for c in self.inputs
if any(
ele in c[0]
for ele in ["ICEV-d", "ICEV-p", "HEV-p", "PHEV-p", "PHEV-d", "HEV-d"]
)
]
index = self.get_index_vehicle_from_array(
["ICEV-d", "ICEV-p", "HEV-p", "PHEV-p", "PHEV-d", "HEV-d"]
)
self.A[
:,
self.inputs[
(
"polyethylene production, high density, granulate",
"RER",
"kilogram",
"polyethylene, high density, granulate",
)
],
index_A,
] = (
array[self.array_inputs["fuel tank mass"], :, index]
/ array[self.array_inputs["lifetime kilometers"], :, index]
* -1
).T
index = self.get_index_vehicle_from_array("ICEV-g")
self.A[
:,
self.inputs[
(
"glass fibre reinforced plastic production, polyamide, injection moulded",
"RER",
"kilogram",
"glass fibre reinforced plastic, polyamide, injection moulded",
)
],
self.index_cng,
] = (
array[self.array_inputs["fuel tank mass"], :, index]
/ array[self.array_inputs["lifetime kilometers"], :, index]
* -1
).T
if "hydrogen" in self.background_configuration["energy storage"]:
# If a customization dict is passed
hydro_tank_technology = self.background_configuration["energy storage"][
"hydrogen"
]["type"]
else:
hydro_tank_technology = "carbon fiber"
dict_tank_map = {
"carbon fiber": (
"Fuel tank, compressed hydrogen gas, 700bar",
"GLO",
"kilogram",
"Fuel tank, compressed hydrogen gas, 700bar",
),
"hdpe": (
"Fuel tank, compressed hydrogen gas, 700bar, with HDPE liner",
"RER",
"kilogram",
"Hydrogen tank",
),
"aluminium": (
"Fuel tank, compressed hydrogen gas, 700bar, with aluminium liner",
"RER",
"kilogram",
"Hydrogen tank",
),
}
index = self.get_index_vehicle_from_array("FCEV")
self.A[
:, self.inputs[dict_tank_map[hydro_tank_technology]], self.index_fuel_cell,
] = (
array[self.array_inputs["fuel tank mass"], :, index]
/ array[self.array_inputs["lifetime kilometers"], :, index]
* -1
).T
for y in self.scope["year"]:
sum_renew, co2_intensity_tech = self.define_renewable_rate_in_mix()
if self.scope["year"].index(y) + 1 == len(self.scope["year"]):
end_str = "\n * "
else:
end_str = "\n \t * "
print(
"in "
+ str(y)
+ ", % of renewable: "
+ str(np.round(sum_renew * 100, 0))
+ "%"
+ ", GHG intensity per kWh: "
+ str(
int(
np.sum(
co2_intensity_tech * self.mix[self.scope["year"].index(y)]
)
)
)
+ " g. CO2-eq.",
end=end_str,
)
if any(
True for x in ["BEV", "PHEV-p", "PHEV-d"] if x in self.scope["powertrain"]
):
for y in self.scope["year"]:
index = self.get_index_vehicle_from_array(
["BEV", "PHEV-p", "PHEV-d"], y, method="and"
)
self.A[
np.ix_(
np.arange(self.iterations),
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity supply for electric vehicles" in i[0]
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "Passenger" in i[0]
and any(
True for x in ["BEV", "PHEV-p", "PHEV-d"] if x in i[0]
)
],
)
] = (
array[self.array_inputs["electricity consumption"], :, index] * -1
).T.reshape(
self.iterations, 1, -1
)
if "FCEV" in self.scope["powertrain"]:
index = self.get_index_vehicle_from_array("FCEV")
print(
"{} is completed by {}.".format(
self.fuel_blends["hydrogen"]["primary"]["type"],
self.fuel_blends["hydrogen"]["secondary"]["type"],
),
end="\n \t * ",
)
for y in self.scope["year"]:
if self.scope["year"].index(y) + 1 == len(self.scope["year"]):
end_str = "\n * "
else:
end_str = "\n \t * "
print(
"in "
+ str(y)
+ " _________________________________________ "
+ str(
np.round(
self.fuel_blends["hydrogen"]["secondary"]["share"][
self.scope["year"].index(y)
]
* 100,
0,
)
)
+ "%",
end=end_str,
)
# Primary fuel share
for y in self.scope["year"]:
ind_A = [
self.inputs[i]
for i in self.inputs
if str(y) in i[0] and "Passenger" in i[0] and "FCEV" in i[0]
]
ind_array = [
x for x in self.get_index_vehicle_from_array(y) if x in index
]
self.A[
:,
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "fuel supply for hydrogen vehicles" in i[0]
],
ind_A,
] = (
array[self.array_inputs["fuel mass"], :, ind_array]
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
if "ICEV-g" in self.scope["powertrain"]:
index = self.get_index_vehicle_from_array("ICEV-g")
print(
"{} is completed by {}.".format(
self.fuel_blends["cng"]["primary"]["type"],
self.fuel_blends["cng"]["secondary"]["type"],
),
end="\n \t * ",
)
for y in self.scope["year"]:
if self.scope["year"].index(y) + 1 == len(self.scope["year"]):
end_str = "\n * "
else:
end_str = "\n \t * "
print(
"in "
+ str(y)
+ " _________________________________________ "
+ str(
np.round(
self.fuel_blends["cng"]["secondary"]["share"][
self.scope["year"].index(y)
]
* 100,
0,
)
)
+ "%",
end=end_str,
)
# Primary fuel share
for y in self.scope["year"]:
ind_A = [
self.inputs[i]
for i in self.inputs
if str(y) in i[0] and "Passenger" in i[0] and "ICEV-g" in i[0]
]
ind_array = [
x for x in self.get_index_vehicle_from_array(y) if x in index
]
self.A[
:,
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0] and "fuel supply for gas vehicles" in i[0]
],
ind_A,
] = (
(array[self.array_inputs["fuel mass"], :, ind_array])
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Fuel-based emissions from CNG, CO2
# The share and CO2 emissions factor of CNG is retrieved, if used
share_fossil = 0
CO2_fossil = 0
if self.fuel_blends["cng"]["primary"]["type"] == "cng":
share_fossil += self.fuel_blends["cng"]["primary"]["share"][
self.scope["year"].index(y)
]
CO2_fossil = self.fuel_blends["cng"]["primary"]["CO2"]
if self.fuel_blends["cng"]["secondary"]["type"] == "cng":
share_fossil += self.fuel_blends["cng"]["secondary"]["share"][
self.scope["year"].index(y)
]
CO2_fossil = self.fuel_blends["cng"]["primary"]["CO2"]
self.A[
:,
self.inputs[("Carbon dioxide, fossil", ("air",), "kilogram")],
ind_A,
] = (
array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil * CO2_fossil
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Fuel-based CO2 emission from alternative petrol
# The share of non-fossil gas in the blend is retrieved
# As well as the CO2 emission factor of the fuel
share_non_fossil = 0
CO2_non_fossil = 0
if self.fuel_blends["cng"]["primary"]["type"] != "cng":
share_non_fossil += self.fuel_blends["cng"]["primary"]["share"][
self.scope["year"].index(y)
]
CO2_non_fossil = self.fuel_blends["cng"]["primary"]["CO2"]
if self.fuel_blends["cng"]["secondary"]["type"] != "cng":
share_non_fossil += self.fuel_blends["cng"]["secondary"]["share"][
self.scope["year"].index(y)
]
CO2_non_fossil = self.fuel_blends["cng"]["secondary"]["CO2"]
self.A[
:,
self.inputs[("Carbon dioxide, from soil or biomass stock", ("air",), "kilogram")],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_non_fossil * CO2_non_fossil)
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
if [i for i in self.scope["powertrain"] if i in ["ICEV-d", "PHEV-d", "HEV-d"]]:
index = self.get_index_vehicle_from_array(["ICEV-d", "PHEV-d", "HEV-d"])
print(
"{} is completed by {}.".format(
self.fuel_blends["diesel"]["primary"]["type"],
self.fuel_blends["diesel"]["secondary"]["type"],
),
end="\n \t * ",
)
for y in self.scope["year"]:
if self.scope["year"].index(y) + 1 == len(self.scope["year"]):
end_str = "\n * "
else:
end_str = "\n \t * "
print(
"in "
+ str(y)
+ " _________________________________________ "
+ str(
np.round(
self.fuel_blends["diesel"]["secondary"]["share"][
self.scope["year"].index(y)
]
* 100,
0,
)
)
+ "%",
end=end_str,
)
for y in self.scope["year"]:
ind_A = [
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "Passenger" in i[0]
and any(x in i[0] for x in ["ICEV-d", "PHEV-d", "HEV-d"])
]
ind_array = [
x for x in self.get_index_vehicle_from_array(y) if x in index
]
# Fuel supply
self.A[
:,
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0] and "fuel supply for diesel vehicles" in i[0]
],
ind_A,
] = (
(array[self.array_inputs["fuel mass"], :, ind_array])
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
share_fossil = 0
CO2_fossil = 0
# Fuel-based CO2 emission from conventional petrol
if self.fuel_blends["diesel"]["primary"]["type"] == "diesel":
share_fossil += self.fuel_blends["diesel"]["primary"]["share"][
self.scope["year"].index(y)
]
CO2_fossil = self.fuel_blends["diesel"]["primary"]["CO2"]
if self.fuel_blends["diesel"]["secondary"]["type"] == "diesel":
share_fossil += self.fuel_blends["diesel"]["secondary"]["share"][
self.scope["year"].index(y)
]
CO2_fossil = self.fuel_blends["diesel"]["secondary"]["CO2"]
self.A[
:,
self.inputs[("Carbon dioxide, fossil", ("air",), "kilogram")],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil * CO2_fossil)
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
share_non_fossil = 0
CO2_non_fossil = 0
# Fuel-based CO2 emission from alternative petrol
# The share of non-fossil fuel in the blend is retrieved
# As well as the CO2 emission factor of the fuel
if self.fuel_blends["diesel"]["primary"]["type"] != "diesel":
share_non_fossil += self.fuel_blends["diesel"]["primary"]["share"][
self.scope["year"].index(y)
]
CO2_non_fossil = self.fuel_blends["diesel"]["primary"]["CO2"]
if self.fuel_blends["diesel"]["secondary"]["type"] != "diesel":
share_non_fossil += self.fuel_blends["diesel"]["secondary"]["share"][
self.scope["year"].index(y)
]
CO2_non_fossil = self.fuel_blends["diesel"]["secondary"]["CO2"]
self.A[
:,
self.inputs[("Carbon dioxide, from soil or biomass stock", ("air",), "kilogram")],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_non_fossil * CO2_non_fossil)
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Heavy metals emissions from conventional diesel
# Emission factors from Spielmann et al., Transport Services Data v.2 (2007)
# Cadmium, 0.01 mg/kg diesel
self.A[
:,
self.inputs[
("Cadmium", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Copper, 1.7 mg/kg diesel
self.A[
:,
self.inputs[
("Copper", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.7e-6
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Chromium, 0.05 mg/kg diesel
self.A[
:,
self.inputs[
("Chromium", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 5.0e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Nickel, 0.07 mg/kg diesel
self.A[
:,
self.inputs[
("Nickel", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 7.0e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Selenium, 0.01 mg/kg diesel
self.A[
:,
self.inputs[
("Selenium", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.0e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Zinc, 1 mg/kg diesel
self.A[
:,
self.inputs[
("Zinc", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.0e-6
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Chromium VI, 0.0001 mg/kg diesel
self.A[
:,
self.inputs[
(
"Chromium VI",
("air", "urban air close to ground"),
"kilogram",
)
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.0e-10
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
if [i for i in self.scope["powertrain"] if i in ["ICEV-p", "HEV-p", "PHEV-p"]]:
index = self.get_index_vehicle_from_array(["ICEV-p", "HEV-p", "PHEV-p"])
print(
"{} is completed by {}.".format(
self.fuel_blends["petrol"]["primary"]["type"],
self.fuel_blends["petrol"]["secondary"]["type"],
),
end="\n \t * ",
)
for y in self.scope["year"]:
if self.scope["year"].index(y) + 1 == len(self.scope["year"]):
end_str = "\n * "
else:
end_str = "\n \t * "
print(
"in "
+ str(y)
+ " _________________________________________ "
+ str(
np.round(
self.fuel_blends["petrol"]["secondary"]["share"][
self.scope["year"].index(y)
]
* 100,
0,
)
)
+ "%",
end=end_str,
)
for y in self.scope["year"]:
ind_A = [
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "Passenger" in i[0]
and any(x in i[0] for x in ["ICEV-p", "HEV-p", "PHEV-p"])
]
ind_array = [
x for x in self.get_index_vehicle_from_array(y) if x in index
]
# Fuel supply
self.A[
:,
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "fuel supply for gasoline vehicles" in i[0]
],
ind_A,
] = (
(array[self.array_inputs["fuel mass"], :, ind_array])
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
share_fossil = 0
CO2_fossil = 0
# Fuel-based CO2 emission from conventional petrol
if self.fuel_blends["petrol"]["primary"]["type"] == "petrol":
share_fossil += self.fuel_blends["petrol"]["primary"]["share"][
self.scope["year"].index(y)
]
CO2_fossil = self.fuel_blends["petrol"]["primary"]["CO2"]
if self.fuel_blends["petrol"]["secondary"]["type"] == "petrol":
share_fossil += self.fuel_blends["petrol"]["secondary"]["share"][
self.scope["year"].index(y)
]
CO2_fossil = self.fuel_blends["petrol"]["secondary"]["CO2"]
self.A[
:,
self.inputs[("Carbon dioxide, fossil", ("air",), "kilogram")],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil * CO2_fossil)
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
share_non_fossil = 0
CO2_non_fossil = 0
# Fuel-based CO2 emission from alternative petrol
# The share of non-fossil fuel in the blend is retrieved
# As well as the CO2 emission factor of the fuel
if self.fuel_blends["petrol"]["primary"]["type"] != "petrol":
share_non_fossil += self.fuel_blends["petrol"]["primary"]["share"][
self.scope["year"].index(y)
]
CO2_non_fossil = self.fuel_blends["petrol"]["primary"]["CO2"]
if self.fuel_blends["petrol"]["secondary"]["type"] != "petrol":
share_non_fossil += self.fuel_blends["petrol"]["secondary"]["share"][
self.scope["year"].index(y)
]
CO2_non_fossil = self.fuel_blends["petrol"]["secondary"]["CO2"]
self.A[
:,
self.inputs[("Carbon dioxide, from soil or biomass stock", ("air",), "kilogram")],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_non_fossil * CO2_non_fossil)
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Heavy metals emissions from conventional petrol
# Cadmium, 0.01 mg/kg gasoline
self.A[
:,
self.inputs[
("Cadmium", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Copper, 1.7 mg/kg gasoline
self.A[
:,
self.inputs[
("Copper", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.7e-6
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Chromium, 0.05 mg/kg gasoline
self.A[
:,
self.inputs[
("Chromium", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 5.0e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Nickel, 0.07 mg/kg gasoline
self.A[
:,
self.inputs[
("Nickel", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 7.0e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Selenium, 0.01 mg/kg gasoline
self.A[
:,
self.inputs[
("Selenium", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.0e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Zinc, 1 mg/kg gasoline
self.A[
:,
self.inputs[
("Zinc", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.0e-6
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Chromium VI, 0.0001 mg/kg gasoline
self.A[
:,
self.inputs[
(
"Chromium VI",
("air", "urban air close to ground"),
"kilogram",
)
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.0e-10
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Non-exhaust emissions
self.A[
:,
self.inputs[
(
"market for road wear emissions, passenger car",
"GLO",
"kilogram",
"road wear emissions, passenger car",
)
],
-self.number_of_cars :,
] = (array[self.array_inputs["driving mass"], :] * 1e-08)
self.A[
:,
self.inputs[
(
"market for tyre wear emissions, passenger car",
"GLO",
"kilogram",
"tyre wear emissions, passenger car",
)
],
-self.number_of_cars :,
] = (array[self.array_inputs["driving mass"], :] * 6e-08)
self.A[
:,
self.inputs[
(
"market for brake wear emissions, passenger car",
"GLO",
"kilogram",
"brake wear emissions, passenger car",
)
],
-self.number_of_cars :,
] = (array[self.array_inputs["driving mass"], :] * 5e-09)
# Infrastructure
self.A[
:,
self.inputs[("market for road", "GLO", "meter-year", "road")],
-self.number_of_cars :,
] = (5.37e-7 * array[self.array_inputs["driving mass"], :] * -1)
# Infrastructure maintenance
self.A[
:,
self.inputs[
("market for road maintenance", "RER", "meter-year", "road maintenance")
],
-self.number_of_cars :,
] = (1.29e-3 * -1)
# Exhaust emissions
# Non-fuel based emissions
self.A[:, self.index_emissions, -self.number_of_cars :] = (
array[
[
self.array_inputs[self.map_non_fuel_emissions[self.rev_inputs[x]]]
for x in self.index_emissions
]
]
* -1
).transpose([1, 0, 2])
# Noise emissions
self.A[:, self.index_noise, -self.number_of_cars :] = (
array[
[
self.array_inputs[self.map_noise_emissions[self.rev_inputs[x]]]
for x in self.index_noise
]
]
* -1
).transpose([1, 0, 2])
print("*********************************************************************")
| 30,672
| 0
| 189
|
544162d5b108b9011d584715752e360c5e3a3bf6
| 4,955
|
py
|
Python
|
project/project/settings.py
|
gtrafimenkov/example-django-kubernetes
|
ddcf1d0b06152ca3615230be53cf9a5f837c09d9
|
[
"BSD-3-Clause"
] | null | null | null |
project/project/settings.py
|
gtrafimenkov/example-django-kubernetes
|
ddcf1d0b06152ca3615230be53cf9a5f837c09d9
|
[
"BSD-3-Clause"
] | 6
|
2021-02-02T22:59:52.000Z
|
2021-06-10T20:35:55.000Z
|
project/project/settings.py
|
gtrafimenkov/example-django-kubernetes
|
ddcf1d0b06152ca3615230be53cf9a5f837c09d9
|
[
"BSD-3-Clause"
] | null | null | null |
# Django settings for gtd project.
import os
from django.contrib.messages import constants as message_constants
DEBUG = get_debug_settings()
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "America/Los_Angeles"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "project.urls"
LOGIN_URL = "/login"
LOGIN_REDIRECT_URL = "todo:lists"
LOGOUT_REDIRECT_URL = "home"
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SESSION_SECURITY_WARN_AFTER = 5
SESSION_SECURITY_EXPIRE_AFTER = 12
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "project.wsgi.application"
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.admindocs",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.flatpages",
"django.contrib.messages",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.staticfiles",
"todo",
"django_extensions",
)
# Static files and uploads
STATIC_URL = "/static/"
STATICFILES_DIRS = [os.path.join(BASE_DIR, "project", "static")]
STATIC_ROOT = os.path.join(BASE_DIR, "static")
# Uploaded media
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
# Without this, uploaded files > 4MB end up with perm 0600, unreadable by web server process
FILE_UPLOAD_PERMISSIONS = 0o644
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "project", "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
# Your stuff: custom template context processors go here
]
},
}
]
# Override CSS class for the ERROR tag level to match Bootstrap class name
MESSAGE_TAGS = {message_constants.ERROR: "danger"}
####################################################################
# Environment specific settings
####################################################################
SECRET_KEY = os.environ.get('SECRET_KEY', 'lksdf98wrhkjs88dsf8-324ksdm')
# DEBUG = True
ALLOWED_HOSTS = ["*"]
DATABASES = get_db_settings()
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# TODO-specific settings
TODO_STAFF_ONLY = False
TODO_DEFAULT_LIST_SLUG = 'tickets'
TODO_DEFAULT_ASSIGNEE = None
TODO_PUBLIC_SUBMIT_REDIRECT = '/'
####################################################################
#
####################################################################
| 32.81457
| 101
| 0.638143
|
# Django settings for gtd project.
import os
from django.contrib.messages import constants as message_constants
def get_debug_settings():
return os.environ.get("DJANGO_DEBUG", "").lower() in ["true", "1", "yes", "y"]
DEBUG = get_debug_settings()
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "America/Los_Angeles"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "project.urls"
LOGIN_URL = "/login"
LOGIN_REDIRECT_URL = "todo:lists"
LOGOUT_REDIRECT_URL = "home"
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SESSION_SECURITY_WARN_AFTER = 5
SESSION_SECURITY_EXPIRE_AFTER = 12
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "project.wsgi.application"
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.admindocs",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.flatpages",
"django.contrib.messages",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.staticfiles",
"todo",
"django_extensions",
)
# Static files and uploads
STATIC_URL = "/static/"
STATICFILES_DIRS = [os.path.join(BASE_DIR, "project", "static")]
STATIC_ROOT = os.path.join(BASE_DIR, "static")
# Uploaded media
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
# Without this, uploaded files > 4MB end up with perm 0600, unreadable by web server process
FILE_UPLOAD_PERMISSIONS = 0o644
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "project", "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
# Your stuff: custom template context processors go here
]
},
}
]
# Override CSS class for the ERROR tag level to match Bootstrap class name
MESSAGE_TAGS = {message_constants.ERROR: "danger"}
####################################################################
# Environment specific settings
####################################################################
SECRET_KEY = os.environ.get('SECRET_KEY', 'lksdf98wrhkjs88dsf8-324ksdm')
# DEBUG = True
ALLOWED_HOSTS = ["*"]
def get_db_settings():
CPHTEST_ENVIRONMENT = os.environ.get('CPHTEST_ENVIRONMENT', 'local')
if CPHTEST_ENVIRONMENT == "local":
return {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if CPHTEST_ENVIRONMENT == "k8s":
return {
'default': {
'ENGINE': os.environ.get('DB_ENGINE', 'django.db.backends.postgresql'),
'NAME': os.environ.get('DB_NAME', 'cphtest'),
'USER': os.environ.get('DB_USER', 'cphtestuser'),
'PASSWORD': os.environ.get('DB_PASSWORD', 'django'),
'HOST': os.environ.get('DB_HOST', 'p1-postgresql.default.svc.cluster.local'),
'PORT': os.environ.get('DB_PORT', ''),
},
}
return {}
DATABASES = get_db_settings()
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# TODO-specific settings
TODO_STAFF_ONLY = False
TODO_DEFAULT_LIST_SLUG = 'tickets'
TODO_DEFAULT_ASSIGNEE = None
TODO_PUBLIC_SUBMIT_REDIRECT = '/'
####################################################################
#
####################################################################
| 975
| 0
| 46
|
1f010d3368e8fe21a4c6b38d8a3a7ce2c8c7822f
| 964
|
py
|
Python
|
resources/search.py
|
DanielNery/api-list-mscs-genius
|
9febbbb4211ca86a210803981cb5968077d7de72
|
[
"MIT"
] | 1
|
2021-11-20T22:09:23.000Z
|
2021-11-20T22:09:23.000Z
|
resources/search.py
|
DanielNery/api-list-mscs-genius
|
9febbbb4211ca86a210803981cb5968077d7de72
|
[
"MIT"
] | null | null | null |
resources/search.py
|
DanielNery/api-list-mscs-genius
|
9febbbb4211ca86a210803981cb5968077d7de72
|
[
"MIT"
] | null | null | null |
from flask_restful import Resource
import requests
import json
import os
import redis
HEADER = {
'User-Agent': 'CompuServe Classic/1.22',
'Accept': 'application/json',
'Host': os.getenv("HOST"),
'Authorization': f'Bearer {os.getenv("ACESS_TOKEN")}'
}
class Search(Resource):
"""Recurso responsável por retornar lista de artistas, para o usuário escolher"""
def get(self, artist_name):
"""
Retorna lista de artistas
"""
querystring = {"q": artist_name}
url = f"https://{os.getenv('HOST')}/search"
try:
response = requests.get(url=url, headers=HEADER, params=querystring)
if response.status_code != '200':
return json.loads(response.text), response.status_code
except Exception as e:
print(e)
return {"message": "Internal server error."}, 500
data = json.loads(response.text)
return data, 200
| 25.368421
| 85
| 0.607884
|
from flask_restful import Resource
import requests
import json
import os
import redis
HEADER = {
'User-Agent': 'CompuServe Classic/1.22',
'Accept': 'application/json',
'Host': os.getenv("HOST"),
'Authorization': f'Bearer {os.getenv("ACESS_TOKEN")}'
}
class Search(Resource):
"""Recurso responsável por retornar lista de artistas, para o usuário escolher"""
def get(self, artist_name):
"""
Retorna lista de artistas
"""
querystring = {"q": artist_name}
url = f"https://{os.getenv('HOST')}/search"
try:
response = requests.get(url=url, headers=HEADER, params=querystring)
if response.status_code != '200':
return json.loads(response.text), response.status_code
except Exception as e:
print(e)
return {"message": "Internal server error."}, 500
data = json.loads(response.text)
return data, 200
| 0
| 0
| 0
|
7baca6067411cc1ecfa07468272839cd744972f8
| 441
|
py
|
Python
|
string/firstUniqueCharacterInAString.py
|
G-MontaG/leetcode
|
444e8ee3f395c191a86eae0e42d028060ecd1686
|
[
"MIT"
] | 1
|
2021-02-10T18:14:55.000Z
|
2021-02-10T18:14:55.000Z
|
string/firstUniqueCharacterInAString.py
|
G-MontaG/leetcode
|
444e8ee3f395c191a86eae0e42d028060ecd1686
|
[
"MIT"
] | null | null | null |
string/firstUniqueCharacterInAString.py
|
G-MontaG/leetcode
|
444e8ee3f395c191a86eae0e42d028060ecd1686
|
[
"MIT"
] | null | null | null |
# https://leetcode.com/problems/first-unique-character-in-a-string/
| 27.5625
| 67
| 0.519274
|
# https://leetcode.com/problems/first-unique-character-in-a-string/
class Solution:
def firstUniqChar(self, s: str) -> int:
mapping = {}
for index, char in enumerate(s):
if char in mapping.keys():
mapping[char] = -1
else:
mapping[char] = index
for char in mapping:
if mapping[char] > -1:
return mapping[char]
return -1
| 331
| -6
| 48
|
0cc5410e4e819af67fb7073f0bb5d856a89be207
| 453
|
py
|
Python
|
ktapp/migrations/0037_ktuser_fav_period.py
|
cu2/KT
|
8a0964b77dce150358637faa679d969a07e42f07
|
[
"CC-BY-3.0"
] | 5
|
2015-04-13T09:44:31.000Z
|
2017-10-19T01:07:58.000Z
|
ktapp/migrations/0037_ktuser_fav_period.py
|
cu2/KT
|
8a0964b77dce150358637faa679d969a07e42f07
|
[
"CC-BY-3.0"
] | 49
|
2015-02-15T07:12:05.000Z
|
2022-03-11T23:11:43.000Z
|
ktapp/migrations/0037_ktuser_fav_period.py
|
cu2/KT
|
8a0964b77dce150358637faa679d969a07e42f07
|
[
"CC-BY-3.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| 21.571429
| 74
| 0.602649
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ktapp', '0036_ktuser_bio'),
]
operations = [
migrations.AddField(
model_name='ktuser',
name='fav_period',
field=models.CharField(max_length=250, null=True, blank=True),
preserve_default=True,
),
]
| 0
| 323
| 23
|
45ca169aee71ee56ada82a211aa1e50134aad821
| 2,133
|
py
|
Python
|
mmtrack/datasets/youtube_vis_dataset.py
|
benxiao/mmtracking
|
4363a05659d5f26da97b9725075dcbb3b13f775f
|
[
"Apache-2.0"
] | 1
|
2022-03-12T21:36:42.000Z
|
2022-03-12T21:36:42.000Z
|
mmtrack/datasets/youtube_vis_dataset.py
|
Readpistol/mmtracking
|
131b8fb7c632324f88c3240229e411e801380f2a
|
[
"Apache-2.0"
] | null | null | null |
mmtrack/datasets/youtube_vis_dataset.py
|
Readpistol/mmtracking
|
131b8fb7c632324f88c3240229e411e801380f2a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.datasets import DATASETS
from .coco_video_dataset import CocoVideoDataset
@DATASETS.register_module()
class YouTubeVISDataset(CocoVideoDataset):
"""YouTube VIS dataset for video instance segmentation."""
CLASSES_2019_version = ('person', 'giant_panda', 'lizard', 'parrot',
'skateboard', 'sedan', 'ape', 'dog', 'snake',
'monkey', 'hand', 'rabbit', 'duck', 'cat', 'cow',
'fish', 'train', 'horse', 'turtle', 'bear',
'motorbike', 'giraffe', 'leopard', 'fox', 'deer',
'owl', 'surfboard', 'airplane', 'truck', 'zebra',
'tiger', 'elephant', 'snowboard', 'boat', 'shark',
'mouse', 'frog', 'eagle', 'earless_seal',
'tennis_racket')
CLASSES_2021_version = ('airplane', 'bear', 'bird', 'boat', 'car', 'cat',
'cow', 'deer', 'dog', 'duck', 'earless_seal',
'elephant', 'fish', 'flying_disc', 'fox', 'frog',
'giant_panda', 'giraffe', 'horse', 'leopard',
'lizard', 'monkey', 'motorbike', 'mouse', 'parrot',
'person', 'rabbit', 'shark', 'skateboard', 'snake',
'snowboard', 'squirrel', 'surfboard',
'tennis_racket', 'tiger', 'train', 'truck',
'turtle', 'whale', 'zebra')
@classmethod
| 48.477273
| 79
| 0.506329
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.datasets import DATASETS
from .coco_video_dataset import CocoVideoDataset
@DATASETS.register_module()
class YouTubeVISDataset(CocoVideoDataset):
"""YouTube VIS dataset for video instance segmentation."""
CLASSES_2019_version = ('person', 'giant_panda', 'lizard', 'parrot',
'skateboard', 'sedan', 'ape', 'dog', 'snake',
'monkey', 'hand', 'rabbit', 'duck', 'cat', 'cow',
'fish', 'train', 'horse', 'turtle', 'bear',
'motorbike', 'giraffe', 'leopard', 'fox', 'deer',
'owl', 'surfboard', 'airplane', 'truck', 'zebra',
'tiger', 'elephant', 'snowboard', 'boat', 'shark',
'mouse', 'frog', 'eagle', 'earless_seal',
'tennis_racket')
CLASSES_2021_version = ('airplane', 'bear', 'bird', 'boat', 'car', 'cat',
'cow', 'deer', 'dog', 'duck', 'earless_seal',
'elephant', 'fish', 'flying_disc', 'fox', 'frog',
'giant_panda', 'giraffe', 'horse', 'leopard',
'lizard', 'monkey', 'motorbike', 'mouse', 'parrot',
'person', 'rabbit', 'shark', 'skateboard', 'snake',
'snowboard', 'squirrel', 'surfboard',
'tennis_racket', 'tiger', 'train', 'truck',
'turtle', 'whale', 'zebra')
def __init__(self, dataset_version, *args, **kwargs):
self.set_dataset_classes(dataset_version)
super().__init__(*args, **kwargs)
@classmethod
def set_dataset_classes(cls, dataset_version):
if dataset_version == '2019':
cls.CLASSES = cls.CLASSES_2019_version
elif dataset_version == '2021':
cls.CLASSES = cls.CLASSES_2021_version
else:
raise NotImplementedError('Not supported YouTubeVIS dataset'
f'version: {dataset_version}')
| 485
| 0
| 53
|
b26e7b4e1b789021e57269548d99674e7d9e0fb6
| 2,198
|
py
|
Python
|
easyDiffractionApp/Logic/DisplayModels/StatusModel.py
|
rozyczko/easyDiffractionApp
|
6b088e3cb19f943e6eee0e86c3c23515b7c6a084
|
[
"BSD-3-Clause"
] | 1
|
2021-05-25T15:26:44.000Z
|
2021-05-25T15:26:44.000Z
|
easyDiffractionApp/Logic/DisplayModels/StatusModel.py
|
rozyczko/easyDiffractionApp
|
6b088e3cb19f943e6eee0e86c3c23515b7c6a084
|
[
"BSD-3-Clause"
] | 138
|
2021-02-12T07:59:04.000Z
|
2022-03-26T12:07:19.000Z
|
easyDiffractionApp/Logic/DisplayModels/StatusModel.py
|
rozyczko/easyDiffractionApp
|
6b088e3cb19f943e6eee0e86c3c23515b7c6a084
|
[
"BSD-3-Clause"
] | 3
|
2021-05-07T07:08:25.000Z
|
2021-11-02T09:53:26.000Z
|
# SPDX-FileCopyrightText: 2021 easyDiffraction contributors <support@easydiffraction.org>
# SPDX-License-Identifier: BSD-3-Clause
# © 2021 Contributors to the easyDiffraction project <https://github.com/easyScience/easyDiffractionApp>
__author__ = 'github.com/andrewsazonov'
__version__ = '0.0.1'
from random import random
from PySide2.QtCore import QPointF
from PySide2.QtCharts import QtCharts
| 27.475
| 104
| 0.641492
|
# SPDX-FileCopyrightText: 2021 easyDiffraction contributors <support@easydiffraction.org>
# SPDX-License-Identifier: BSD-3-Clause
# © 2021 Contributors to the easyDiffraction project <https://github.com/easyScience/easyDiffractionApp>
__author__ = 'github.com/andrewsazonov'
__version__ = '0.0.1'
from random import random
from PySide2.QtCore import QPointF
from PySide2.QtCharts import QtCharts
class StatusModel:
def __init__(self, paren=None):
super().__init__(parent)
def updateSeries(self):
"""
Generates new data and updates the GUI ChartView LineSeries.
"""
if not self._lowerSeriesRefs or not self._upperSeriesRefs:
return
lowerSeries = self._dataObj.get_lowerXY()
upperSeries = self._dataObj.get_upperXY()
for seriesRef in self._lowerSeriesRefs:
seriesRef.replace(lowerSeries)
for seriesRef in self._upperSeriesRefs:
seriesRef.replace(upperSeries)
def updateData(self, dataObj):
"""
Update ...
"""
self._dataObj = dataObj
self.updateSeries()
def addLowerSeriesRef(self, seriesRef):
"""
Sets series to be a reference to the GUI ChartView LineSeries.
"""
self._lowerSeriesRefs.append(seriesRef)
def addUpperSeriesRef(self, seriesRef):
"""
Sets series to be a reference to the GUI ChartView LineSeries.
"""
self._upperSeriesRefs.append(seriesRef)
class CalculatedDataModel:
def __init__(self, dataObj=None):
self._seriesRef = None
self._dataObj = dataObj
def updateSeries(self):
"""
Generates new data and updates the GUI ChartView LineSeries.
"""
if self._seriesRef is None:
return
series = self._dataObj.get_fit_XY()
self._seriesRef.replace(series)
def updateData(self, dataObj):
"""
Update ...
"""
self._dataObj = dataObj
self.updateSeries()
def setSeriesRef(self, seriesRef):
"""
Sets series to be a reference to the GUI ChartView LineSeries.
"""
self._seriesRef = seriesRef
| 118
| 1,633
| 46
|
41c794a5523ae6175185d6430eee0502fa65573d
| 1,349
|
py
|
Python
|
run.py
|
iustce/cesa-web
|
8b6b1fd8a66277b7319fdbf327e19948cc56917d
|
[
"MIT"
] | 1
|
2018-10-13T19:48:05.000Z
|
2018-10-13T19:48:05.000Z
|
run.py
|
iustce/cesa-web
|
8b6b1fd8a66277b7319fdbf327e19948cc56917d
|
[
"MIT"
] | null | null | null |
run.py
|
iustce/cesa-web
|
8b6b1fd8a66277b7319fdbf327e19948cc56917d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# python imports
import os
import subprocess
import sys, traceback
from flask.ext.migrate import MigrateCommand
from flask.ext.script import Manager
from database import manager as database_manager
try:
from project import app
from project.application import configure_app
from project.config import DefaultConfig, DevelopmentConfig, ProductionConfig
except ImportError:
print ' *** please install/update requirements or fix the problem ***'
traceback.print_exc(file=sys.stdout)
exit(0)
manager = Manager(app)
manager.add_command('database', database_manager)
manager.add_command('migration', MigrateCommand)
fwpath = os.path.abspath(os.path.dirname(__file__))
venv_dir = os.path.join(fwpath, 'venv')
@manager.command
@manager.command
@manager.command
@manager.command
if __name__ == '__main__':
manager.run()
| 24.981481
| 111
| 0.731653
|
# -*- coding: utf-8 -*-
# python imports
import os
import subprocess
import sys, traceback
from flask.ext.migrate import MigrateCommand
from flask.ext.script import Manager
from database import manager as database_manager
try:
from project import app
from project.application import configure_app
from project.config import DefaultConfig, DevelopmentConfig, ProductionConfig
except ImportError:
print ' *** please install/update requirements or fix the problem ***'
traceback.print_exc(file=sys.stdout)
exit(0)
manager = Manager(app)
manager.add_command('database', database_manager)
manager.add_command('migration', MigrateCommand)
fwpath = os.path.abspath(os.path.dirname(__file__))
venv_dir = os.path.join(fwpath, 'venv')
@manager.command
def run():
configure_app(app, DevelopmentConfig())
app.run(host='0.0.0.0', port=5000)
@manager.command
def import_local_config_file(filename):
if not os.path.isabs(filename):
filename = os.path.join(os.getcwd(), filename)
configure_app(app, filename, is_pyfile=True)
app.run(host='0.0.0.0', port=5000)
@manager.command
def test():
pass
@manager.command
def update_requirements():
subprocess.call([os.path.join(venv_dir, 'bin/pip'), 'install', '-r', os.path.join(fwpath, 'requirements')])
if __name__ == '__main__':
manager.run()
| 386
| 0
| 88
|
3622ebf53eb605a0ad50e3ba80cbe1fe001d8264
| 11,174
|
py
|
Python
|
docs/model.py
|
DLR-SC/gitlab2prov
|
0a548cf85121faa63ef9abbbf0d43aa4e0bc3d57
|
[
"MIT"
] | 13
|
2019-10-14T19:28:04.000Z
|
2022-03-24T09:46:50.000Z
|
docs/model.py
|
DLR-SC/gitlab2prov
|
0a548cf85121faa63ef9abbbf0d43aa4e0bc3d57
|
[
"MIT"
] | 50
|
2019-10-15T09:05:09.000Z
|
2022-03-28T10:51:22.000Z
|
docs/model.py
|
DLR-SC/gitlab2prov
|
0a548cf85121faa63ef9abbbf0d43aa4e0bc3d57
|
[
"MIT"
] | 2
|
2020-05-16T15:40:04.000Z
|
2021-09-14T12:08:19.000Z
|
"""PROV model fpr GitLab2PROV."""
__author__ = "Claas de Boer, Andreas Schreiber, Lynn von Kurnatowski"
__copyright__ = "Copyright 2020, German Aerospace Center (DLR) and individual contributors"
__license__ = "MIT"
__version__ = "0.5"
__status__ = "Development"
from prov.model import ProvDocument
from prov.constants import PROV_LABEL
from prov.dot import prov_to_dot
add = ProvDocument()
add.set_default_namespace("gitlab2prov:")
add.activity("Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""})
add.activity("Parent Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""})
add.agent("Committer", other_attributes={"prov:type": "user", "prov:role": "committer", "name": "", "email": ""})
add.agent("Author", other_attributes={"prov:type": "user", "prov:role": "author", "name": "", "email": ""})
add.entity("File", other_attributes={"prov:type": "file", "path_at_addition": ""})
add.entity("File Version", other_attributes={"prov:type": "file_version", "old_path": "", "new_path": ""})
add.wasInformedBy("Commit", "Parent Commit")
add.wasAssociatedWith("Commit", "Committer")
add.wasAssociatedWith("Commit", "Author")
add.wasGeneratedBy("File", "Commit")
add.wasGeneratedBy("File Version", "Commit")
add.wasAttributedTo("File", "Author")
add.wasAttributedTo("File Version", "Author")
add.specializationOf("File Version", "File")
mod = ProvDocument()
mod.set_default_namespace("gitlab2prov:")
mod.activity("Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""},)
mod.activity("Parent Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""},)
mod.agent("Committer", other_attributes={"prov:type": "user", "prov:role": "committer", "name": "", "email": ""})
mod.agent("Author", other_attributes={"prov:type": "user", "prov:role": "author", "name": "", "email": "",})
mod.entity("File", other_attributes={"prov:type": "file", "path_at_addition": ""})
mod.entity("File Version N", other_attributes={"prov:type": "file_version", "new_path": "", "old_path": ""})
mod.entity("File Version N-1", other_attributes={"prov:type": "file_version", "new_path": "", "old_path": ""})
mod.wasInformedBy("Commit", "Parent Commit")
mod.wasAssociatedWith("Commit", "Author")
mod.wasAssociatedWith("Commit", "Committer")
mod.used("Commit", "File Version N-1")
mod.wasGeneratedBy("File Version N", "Commit")
mod.wasRevisionOf("File Version N", "File Version N-1")
mod.specializationOf("File Version N", "File")
mod.specializationOf("File Version N-1", "File")
mod.wasAttributedTo("File Version N", "Author")
rem = ProvDocument()
rem.set_default_namespace("gitlab2prov:")
rem.activity("Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""})
rem.activity("Parent Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""})
rem.agent("Committer", other_attributes={"prov:type": "user", "prov:role": "committer", "name": "", "email": ""})
rem.agent("Author", other_attributes={"prov:type": "user", "prov:role": "author", "name": "", "email": ""})
rem.entity("File", other_attributes={"prov:type": "file", "path_at_addition": ""})
rem.entity("File Version", other_attributes={"prov:type": "file_version", "new_path": "", "old_path": ""})
rem.wasInformedBy("Commit", "Parent Commit")
rem.wasAssociatedWith("Commit", "Committer")
rem.wasAssociatedWith("Commit", "Author")
rem.wasInvalidatedBy("File Version", "Commit")
rem.specializationOf("File Version", "File")
com = ProvDocument()
com.set_default_namespace("gitlab2prov:")
com.agent("Creator", other_attributes={"prov:type": "user", "prov:role": "creator", "name": ""})
com.agent("Annotator", other_attributes={"prov:type": "user", "prov:role": "initiator", "name": ""})
com.activity("Commit Creation", other_attributes={"prov:type": "creation", "prov:startedAt": "", "prov:endedAt": ""})
com.activity("Commit Annotation", other_attributes={"prov:type": "event", "prov:startedAt": "", "prov:endedAt": "", "event": ""})
com.activity("Git Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""})
com.wasInformedBy("Commit Creation", "Git Commit")
com.entity("Commit", other_attributes={"prov:type": "commit_resource", "title": "", "message": "", "short_id": "", "id": ""})
com.entity("Commit Version", other_attributes={"prov:type": "commit_resource_version"})
com.entity("Annotated Commit Version", other_attributes={"prov:type": "commit_resource_version"},)
com.wasAssociatedWith("Commit Creation", "Creator")
com.wasAttributedTo("Commit", "Creator")
com.wasAttributedTo("Commit Version", "Creator")
com.wasGeneratedBy("Commit", "Commit Creation")
com.wasGeneratedBy("Commit Version", "Commit Creation")
com.wasAttributedTo("Annotated Commit Version", "Annotator")
com.wasAssociatedWith("Commit Annotation", "Annotator")
com.used("Commit Annotation", "Commit Version")
com.wasInformedBy("Commit Annotation", "Commit Creation")
com.wasGeneratedBy("Annotated Commit Version", "Commit Annotation")
com.specializationOf("Commit Version", "Commit")
com.specializationOf("Annotated Commit Version", "Commit")
com.wasDerivedFrom("Annotated Commit Version", "Commit Version")
mr = ProvDocument()
mr.set_default_namespace("gitlab2prov:")
mr.agent("Creator", other_attributes={"prov:type": "user", "prov:role": "creator", "name": ""},)
mr.agent("Annotator", other_attributes={"prov:type": "user", "prov:role": "initiator", "name": ""})
mr.activity("Merge Request Creation", other_attributes={"prov:type": "merge_request_creation", "prov:startedAt": "", "prov:endedAt": ""})
mr.activity("Merge Request Annotation", other_attributes={"prov:type": "event", "prov:startedAt": "", "prov:endedAt": "", "event": ""})
mr.entity("Merge Request", other_attributes={"prov:type": "merge_request_resource", "id": "", "iid": "", "title": "", "description": "", "web_url": "", "project_id": "", "source_branch": "", "target_branch": "", "source_project_url": "", "target_project_url": ""})
mr.entity("Merge Request Version", other_attributes={"prov:type": "merge_request_resource_version"},)
mr.entity("Annotated Merge Request Version", other_attributes={"prov:type": "merge_request_resource_version"},)
mr.wasInformedBy("Merge Request Annotation", "Merge Request Creation")
mr.wasGeneratedBy("Merge Request", "Merge Request Creation")
mr.wasGeneratedBy("Merge Request Version", "Merge Request Creation")
mr.wasGeneratedBy("Annotated Merge Request Version", "Merge Request Annotation")
mr.used("Merge Request Annotation", "Merge Request Version")
mr.specializationOf("Merge Request Version", "Merge Request")
mr.specializationOf("Annotated Merge Request Version", "Merge Request")
mr.wasDerivedFrom("Annotated Merge Request Version", "Merge Request Version")
mr.wasAttributedTo("Annotated Merge Request Version", "Annotator")
mr.wasAttributedTo("Merge Request Version", "Creator")
mr.wasAttributedTo("Merge Request", "Creator")
mr.wasAssociatedWith("Merge Request Creation", "Creator")
mr.wasAssociatedWith("Merge Request Annotation", "Annotator")
iss = ProvDocument()
iss.set_default_namespace("gitlab2prov:")
iss.agent("Creator", other_attributes={"prov:type": "user", "prov:role": "creator", "name": ""})
iss.agent("Annotator", other_attributes={"prov:type": "user", "prov:role": "initiator", "name": ""})
iss.activity("Issue Creation", other_attributes={"prov:type": "issue_creation", "prov:startedAt": "", "prov:endedAt": ""})
iss.activity("Issue Annotation", other_attributes={"prov:type": "event", "prov:startedAt": "", "prov:endedAt": "", "event": ""})
iss.entity("Issue", other_attributes={"prov:type": "issue_resource", "id": "", "iid": "", "title": "", "description": "", "project_id": "", "web_url": ""})
iss.entity("Issue Version", other_attributes={"prov:type": "issue_resource_version"})
iss.entity("Annotated Issue Version", other_attributes={"prov:type": "issue_resource_version"})
iss.wasInformedBy("Issue Annotation", "Issue Creation")
iss.wasGeneratedBy("Issue", "Issue Creation")
iss.wasGeneratedBy("Issue Version", "Issue Creation")
iss.wasGeneratedBy("Annotated Issue Version", "Issue Annotation")
iss.used("Issue Annotation", "Issue Version")
iss.specializationOf("Issue Version", "Issue")
iss.specializationOf("Annotated Issue Version", "Issue")
iss.wasDerivedFrom("Annotated Issue Version", "Issue Version")
iss.wasAttributedTo("Annotated Issue Version", "Annotator")
iss.wasAttributedTo("Issue Version", "Creator")
iss.wasAttributedTo("Issue", "Creator")
iss.wasAssociatedWith("Issue Creation", "Creator")
iss.wasAssociatedWith("Issue Annotation", "Annotator")
release_tag_model = ProvDocument()
release_tag_model.set_default_namespace("gitlab2prov:")
release_tag_model.agent("User", {"name": "", "email": ""})
release_tag_model.activity("Release_Event")
release_tag_model.activity("Tag_Event")
release_tag_model.activity("Commit_Event")
release_tag_model.entity("Tag", {"prov:type": "prov:Collection", "name": "", "message": "", "commit": "", "target_commit": ""})
release_tag_model.entity("Release", {"prov:type": "prov:Collection", "name": "", "tag_name": "", "description": "", "created_at": "", "released_at": "", "commit_path": "", "tag_path": ""})
release_tag_model.entity("Commit", {"id": "", "short_id": "", "title": "", "message": "", "web_url": "", "created_at": ""})
release_tag_model.entity("Release_Evidence", {"sha": "", "filepath": "", "collected_at": ""})
release_tag_model.entity("Release_Asset", {"uri": "", "format": "", "filepath": ""})
release_tag_model.hadMember("Release_Asset", "Release")
release_tag_model.hadMember("Release_Evidence", "Release")
release_tag_model.hadMember("Tag", "Release")
release_tag_model.hadMember("Commit", "Tag")
release_tag_model.wasAssociatedWith("Commit_Event", "User")
release_tag_model.wasAssociatedWith("Release_Event", "User")
release_tag_model.wasAssociatedWith("Tag_Event", "User")
release_tag_model.wasAttributedTo("Release", "User")
release_tag_model.wasAttributedTo("Tag", "User")
release_tag_model.wasAttributedTo("Commit", "User")
release_tag_model.wasGeneratedBy("Release", "Release_Event")
release_tag_model.wasGeneratedBy("Tag", "Tag_Event")
release_tag_model.wasGeneratedBy("Commit", "Commit_Event")
for title, doc in [
("git_commit_model_add", add),
("git_commit_model_mod", mod),
("git_commit_model_del", rem),
("gitlab_commit_model", com),
("gitlab_issue_model", iss),
("gitlab_merge_request_model", mr),
("gitlab_release_tag_model", release_tag_model)
]:
prov_to_dot(doc, show_nary=False, use_labels=False, direction="BT").write_pdf(
f"pdfs/{title}.pdf"
)
prov_to_dot(doc, show_nary=False, use_labels=False, direction="BT").write_svg(
f"svgs/{title}.svg"
)
| 61.060109
| 264
| 0.707625
|
"""PROV model fpr GitLab2PROV."""
__author__ = "Claas de Boer, Andreas Schreiber, Lynn von Kurnatowski"
__copyright__ = "Copyright 2020, German Aerospace Center (DLR) and individual contributors"
__license__ = "MIT"
__version__ = "0.5"
__status__ = "Development"
from prov.model import ProvDocument
from prov.constants import PROV_LABEL
from prov.dot import prov_to_dot
add = ProvDocument()
add.set_default_namespace("gitlab2prov:")
add.activity("Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""})
add.activity("Parent Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""})
add.agent("Committer", other_attributes={"prov:type": "user", "prov:role": "committer", "name": "", "email": ""})
add.agent("Author", other_attributes={"prov:type": "user", "prov:role": "author", "name": "", "email": ""})
add.entity("File", other_attributes={"prov:type": "file", "path_at_addition": ""})
add.entity("File Version", other_attributes={"prov:type": "file_version", "old_path": "", "new_path": ""})
add.wasInformedBy("Commit", "Parent Commit")
add.wasAssociatedWith("Commit", "Committer")
add.wasAssociatedWith("Commit", "Author")
add.wasGeneratedBy("File", "Commit")
add.wasGeneratedBy("File Version", "Commit")
add.wasAttributedTo("File", "Author")
add.wasAttributedTo("File Version", "Author")
add.specializationOf("File Version", "File")
mod = ProvDocument()
mod.set_default_namespace("gitlab2prov:")
mod.activity("Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""},)
mod.activity("Parent Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""},)
mod.agent("Committer", other_attributes={"prov:type": "user", "prov:role": "committer", "name": "", "email": ""})
mod.agent("Author", other_attributes={"prov:type": "user", "prov:role": "author", "name": "", "email": "",})
mod.entity("File", other_attributes={"prov:type": "file", "path_at_addition": ""})
mod.entity("File Version N", other_attributes={"prov:type": "file_version", "new_path": "", "old_path": ""})
mod.entity("File Version N-1", other_attributes={"prov:type": "file_version", "new_path": "", "old_path": ""})
mod.wasInformedBy("Commit", "Parent Commit")
mod.wasAssociatedWith("Commit", "Author")
mod.wasAssociatedWith("Commit", "Committer")
mod.used("Commit", "File Version N-1")
mod.wasGeneratedBy("File Version N", "Commit")
mod.wasRevisionOf("File Version N", "File Version N-1")
mod.specializationOf("File Version N", "File")
mod.specializationOf("File Version N-1", "File")
mod.wasAttributedTo("File Version N", "Author")
rem = ProvDocument()
rem.set_default_namespace("gitlab2prov:")
rem.activity("Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""})
rem.activity("Parent Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""})
rem.agent("Committer", other_attributes={"prov:type": "user", "prov:role": "committer", "name": "", "email": ""})
rem.agent("Author", other_attributes={"prov:type": "user", "prov:role": "author", "name": "", "email": ""})
rem.entity("File", other_attributes={"prov:type": "file", "path_at_addition": ""})
rem.entity("File Version", other_attributes={"prov:type": "file_version", "new_path": "", "old_path": ""})
rem.wasInformedBy("Commit", "Parent Commit")
rem.wasAssociatedWith("Commit", "Committer")
rem.wasAssociatedWith("Commit", "Author")
rem.wasInvalidatedBy("File Version", "Commit")
rem.specializationOf("File Version", "File")
com = ProvDocument()
com.set_default_namespace("gitlab2prov:")
com.agent("Creator", other_attributes={"prov:type": "user", "prov:role": "creator", "name": ""})
com.agent("Annotator", other_attributes={"prov:type": "user", "prov:role": "initiator", "name": ""})
com.activity("Commit Creation", other_attributes={"prov:type": "creation", "prov:startedAt": "", "prov:endedAt": ""})
com.activity("Commit Annotation", other_attributes={"prov:type": "event", "prov:startedAt": "", "prov:endedAt": "", "event": ""})
com.activity("Git Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""})
com.wasInformedBy("Commit Creation", "Git Commit")
com.entity("Commit", other_attributes={"prov:type": "commit_resource", "title": "", "message": "", "short_id": "", "id": ""})
com.entity("Commit Version", other_attributes={"prov:type": "commit_resource_version"})
com.entity("Annotated Commit Version", other_attributes={"prov:type": "commit_resource_version"},)
com.wasAssociatedWith("Commit Creation", "Creator")
com.wasAttributedTo("Commit", "Creator")
com.wasAttributedTo("Commit Version", "Creator")
com.wasGeneratedBy("Commit", "Commit Creation")
com.wasGeneratedBy("Commit Version", "Commit Creation")
com.wasAttributedTo("Annotated Commit Version", "Annotator")
com.wasAssociatedWith("Commit Annotation", "Annotator")
com.used("Commit Annotation", "Commit Version")
com.wasInformedBy("Commit Annotation", "Commit Creation")
com.wasGeneratedBy("Annotated Commit Version", "Commit Annotation")
com.specializationOf("Commit Version", "Commit")
com.specializationOf("Annotated Commit Version", "Commit")
com.wasDerivedFrom("Annotated Commit Version", "Commit Version")
mr = ProvDocument()
mr.set_default_namespace("gitlab2prov:")
mr.agent("Creator", other_attributes={"prov:type": "user", "prov:role": "creator", "name": ""},)
mr.agent("Annotator", other_attributes={"prov:type": "user", "prov:role": "initiator", "name": ""})
mr.activity("Merge Request Creation", other_attributes={"prov:type": "merge_request_creation", "prov:startedAt": "", "prov:endedAt": ""})
mr.activity("Merge Request Annotation", other_attributes={"prov:type": "event", "prov:startedAt": "", "prov:endedAt": "", "event": ""})
mr.entity("Merge Request", other_attributes={"prov:type": "merge_request_resource", "id": "", "iid": "", "title": "", "description": "", "web_url": "", "project_id": "", "source_branch": "", "target_branch": "", "source_project_url": "", "target_project_url": ""})
mr.entity("Merge Request Version", other_attributes={"prov:type": "merge_request_resource_version"},)
mr.entity("Annotated Merge Request Version", other_attributes={"prov:type": "merge_request_resource_version"},)
mr.wasInformedBy("Merge Request Annotation", "Merge Request Creation")
mr.wasGeneratedBy("Merge Request", "Merge Request Creation")
mr.wasGeneratedBy("Merge Request Version", "Merge Request Creation")
mr.wasGeneratedBy("Annotated Merge Request Version", "Merge Request Annotation")
mr.used("Merge Request Annotation", "Merge Request Version")
mr.specializationOf("Merge Request Version", "Merge Request")
mr.specializationOf("Annotated Merge Request Version", "Merge Request")
mr.wasDerivedFrom("Annotated Merge Request Version", "Merge Request Version")
mr.wasAttributedTo("Annotated Merge Request Version", "Annotator")
mr.wasAttributedTo("Merge Request Version", "Creator")
mr.wasAttributedTo("Merge Request", "Creator")
mr.wasAssociatedWith("Merge Request Creation", "Creator")
mr.wasAssociatedWith("Merge Request Annotation", "Annotator")
iss = ProvDocument()
iss.set_default_namespace("gitlab2prov:")
iss.agent("Creator", other_attributes={"prov:type": "user", "prov:role": "creator", "name": ""})
iss.agent("Annotator", other_attributes={"prov:type": "user", "prov:role": "initiator", "name": ""})
iss.activity("Issue Creation", other_attributes={"prov:type": "issue_creation", "prov:startedAt": "", "prov:endedAt": ""})
iss.activity("Issue Annotation", other_attributes={"prov:type": "event", "prov:startedAt": "", "prov:endedAt": "", "event": ""})
iss.entity("Issue", other_attributes={"prov:type": "issue_resource", "id": "", "iid": "", "title": "", "description": "", "project_id": "", "web_url": ""})
iss.entity("Issue Version", other_attributes={"prov:type": "issue_resource_version"})
iss.entity("Annotated Issue Version", other_attributes={"prov:type": "issue_resource_version"})
iss.wasInformedBy("Issue Annotation", "Issue Creation")
iss.wasGeneratedBy("Issue", "Issue Creation")
iss.wasGeneratedBy("Issue Version", "Issue Creation")
iss.wasGeneratedBy("Annotated Issue Version", "Issue Annotation")
iss.used("Issue Annotation", "Issue Version")
iss.specializationOf("Issue Version", "Issue")
iss.specializationOf("Annotated Issue Version", "Issue")
iss.wasDerivedFrom("Annotated Issue Version", "Issue Version")
iss.wasAttributedTo("Annotated Issue Version", "Annotator")
iss.wasAttributedTo("Issue Version", "Creator")
iss.wasAttributedTo("Issue", "Creator")
iss.wasAssociatedWith("Issue Creation", "Creator")
iss.wasAssociatedWith("Issue Annotation", "Annotator")
release_tag_model = ProvDocument()
release_tag_model.set_default_namespace("gitlab2prov:")
release_tag_model.agent("User", {"name": "", "email": ""})
release_tag_model.activity("Release_Event")
release_tag_model.activity("Tag_Event")
release_tag_model.activity("Commit_Event")
release_tag_model.entity("Tag", {"prov:type": "prov:Collection", "name": "", "message": "", "commit": "", "target_commit": ""})
release_tag_model.entity("Release", {"prov:type": "prov:Collection", "name": "", "tag_name": "", "description": "", "created_at": "", "released_at": "", "commit_path": "", "tag_path": ""})
release_tag_model.entity("Commit", {"id": "", "short_id": "", "title": "", "message": "", "web_url": "", "created_at": ""})
release_tag_model.entity("Release_Evidence", {"sha": "", "filepath": "", "collected_at": ""})
release_tag_model.entity("Release_Asset", {"uri": "", "format": "", "filepath": ""})
release_tag_model.hadMember("Release_Asset", "Release")
release_tag_model.hadMember("Release_Evidence", "Release")
release_tag_model.hadMember("Tag", "Release")
release_tag_model.hadMember("Commit", "Tag")
release_tag_model.wasAssociatedWith("Commit_Event", "User")
release_tag_model.wasAssociatedWith("Release_Event", "User")
release_tag_model.wasAssociatedWith("Tag_Event", "User")
release_tag_model.wasAttributedTo("Release", "User")
release_tag_model.wasAttributedTo("Tag", "User")
release_tag_model.wasAttributedTo("Commit", "User")
release_tag_model.wasGeneratedBy("Release", "Release_Event")
release_tag_model.wasGeneratedBy("Tag", "Tag_Event")
release_tag_model.wasGeneratedBy("Commit", "Commit_Event")
for title, doc in [
("git_commit_model_add", add),
("git_commit_model_mod", mod),
("git_commit_model_del", rem),
("gitlab_commit_model", com),
("gitlab_issue_model", iss),
("gitlab_merge_request_model", mr),
("gitlab_release_tag_model", release_tag_model)
]:
prov_to_dot(doc, show_nary=False, use_labels=False, direction="BT").write_pdf(
f"pdfs/{title}.pdf"
)
prov_to_dot(doc, show_nary=False, use_labels=False, direction="BT").write_svg(
f"svgs/{title}.svg"
)
| 0
| 0
| 0
|
f495910cbad974850a149f592f1022624205f1c7
| 9,479
|
py
|
Python
|
scripts/go_stats_utils.py
|
kltm/go-site
|
fe6797ed1291bd0d12df83b7c9d670c91a0fb526
|
[
"BSD-3-Clause"
] | 31
|
2016-11-01T13:11:43.000Z
|
2022-02-28T05:05:16.000Z
|
scripts/go_stats_utils.py
|
kltm/go-site
|
fe6797ed1291bd0d12df83b7c9d670c91a0fb526
|
[
"BSD-3-Clause"
] | 1,172
|
2015-01-29T23:47:53.000Z
|
2022-03-30T05:22:01.000Z
|
scripts/go_stats_utils.py
|
kltm/go-site
|
fe6797ed1291bd0d12df83b7c9d670c91a0fb526
|
[
"BSD-3-Clause"
] | 92
|
2015-02-11T03:10:55.000Z
|
2022-03-01T08:16:02.000Z
|
import json
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from enum import Enum
# This is a hard coded list of evidence, better organized for readability
ev_all = ['EXP', 'IDA', 'IMP', 'IGI', 'IPI', 'IEP', 'IGC', 'RCA', 'IBA', 'IKR', 'IC', 'NAS', 'ND', 'TAS', 'HDA', 'HEP', 'HGI', 'HMP', 'ISA', 'ISM', 'ISO', 'ISS', 'IEA']
# This is a hard coded list of reference genomes that should always be present in a GO release
REFERENCE_GENOME_IDS = [
"NCBITaxon:9606",
"NCBITaxon:10116",
"NCBITaxon:10090",
"NCBITaxon:3702",
"NCBITaxon:7955",
"NCBITaxon:6239",
"NCBITaxon:559292",
"NCBITaxon:7227",
"NCBITaxon:44689",
"NCBITaxon:4896",
"NCBITaxon:83333"
]
BP_TERM_ID = "GO:0008150"
MF_TERM_ID = "GO:0003674"
CC_TERM_ID = "GO:0005575"
# useful grouping of evidences as discussed with Pascale
EVIDENCE_GROUPS = {
"EXP": ["EXP", "IDA", "IEP", "IGI", "IMP", "IPI"],
"HTP": ["HDA", "HEP", "HGI", "HMP", "HTP"],
"PHYLO": ["IBA", "IRD", "IKR", "IMR"],
"IEA": ["IEA"],
"ND": ["ND"],
"OTHER": ["IC", "IGC", "ISA", "ISM", "ISO", "ISS", "NAS", "RCA", "TAS"]
}
EVIDENCE_MIN_GROUPS = {
"EXPERIMENTAL" : EVIDENCE_GROUPS["EXP"] + EVIDENCE_GROUPS["HTP"],
"COMPUTATIONAL" : EVIDENCE_GROUPS["PHYLO"] + EVIDENCE_GROUPS["IEA"] + EVIDENCE_GROUPS["OTHER"]
}
global_session = None
def fetch(url):
"""
Error proof method to get data from HTTP request
If an error occured, return None
"""
global global_session
# Ensure we are using the same session - creating too many sessions could crash this script
if global_session is None:
global_session = requests_retry(global_session)
try:
r = global_session.get(url)
return r
except Exception as x:
print("Query GET " , url , " failed: ", x)
return None
def golr_fetch(golr_base_url, select_query):
"""
Error proof method to get data from GOLr
If an HTTP error occurs, return None, otherwise return the json object
"""
r = fetch(golr_base_url + select_query)
if r is None:
return None
response = r.json()
return response
# utility function to build a list from a solr/golr facet array
# utility function to transform a list [A, 1, B, 2] into a map {A: 1, B: 2}
# utility function to build a reverse map: { "a": 1, "b": 1, "c": 2 } -> {1: ["a", "b"], 2: ["c"]}
# utility function to cluster elements of an input map based on another map of synonyms
# similar as above but the value of each key is also a map
# reorder map (python 3.6 keeps order in which items are inserted in map: https://stackoverflow.com/questions/613183/how-do-i-sort-a-dictionary-by-value)
def bioentity_type(str_type):
"""
In a nutshell, collapse all RNA related types into RNA
"""
if "RNA" in str_type or "ribozyme" in str_type or "transcript" in str_type:
return "RNA_cluster"
return str_type
def sum_map_values(map):
"""
Utility function to sum up the values of a map. Assume the map values are all numbers
"""
total = 0
for key, val in map.items():
total += val
return total
| 31.387417
| 169
| 0.606815
|
import json
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from enum import Enum
# This is a hard coded list of evidence, better organized for readability
ev_all = ['EXP', 'IDA', 'IMP', 'IGI', 'IPI', 'IEP', 'IGC', 'RCA', 'IBA', 'IKR', 'IC', 'NAS', 'ND', 'TAS', 'HDA', 'HEP', 'HGI', 'HMP', 'ISA', 'ISM', 'ISO', 'ISS', 'IEA']
class CLOSURE_LABELS(Enum):
ISA = "isa_closure"
ISA_PARTOF = "isa_partof_closure"
REGULATES = "regulates_closure"
# This is a hard coded list of reference genomes that should always be present in a GO release
REFERENCE_GENOME_IDS = [
"NCBITaxon:9606",
"NCBITaxon:10116",
"NCBITaxon:10090",
"NCBITaxon:3702",
"NCBITaxon:7955",
"NCBITaxon:6239",
"NCBITaxon:559292",
"NCBITaxon:7227",
"NCBITaxon:44689",
"NCBITaxon:4896",
"NCBITaxon:83333"
]
BP_TERM_ID = "GO:0008150"
MF_TERM_ID = "GO:0003674"
CC_TERM_ID = "GO:0005575"
# useful grouping of evidences as discussed with Pascale
EVIDENCE_GROUPS = {
"EXP": ["EXP", "IDA", "IEP", "IGI", "IMP", "IPI"],
"HTP": ["HDA", "HEP", "HGI", "HMP", "HTP"],
"PHYLO": ["IBA", "IRD", "IKR", "IMR"],
"IEA": ["IEA"],
"ND": ["ND"],
"OTHER": ["IC", "IGC", "ISA", "ISM", "ISO", "ISS", "NAS", "RCA", "TAS"]
}
EVIDENCE_MIN_GROUPS = {
"EXPERIMENTAL" : EVIDENCE_GROUPS["EXP"] + EVIDENCE_GROUPS["HTP"],
"COMPUTATIONAL" : EVIDENCE_GROUPS["PHYLO"] + EVIDENCE_GROUPS["IEA"] + EVIDENCE_GROUPS["OTHER"]
}
def is_experimental(evidence_type):
return evidence_type in EVIDENCE_MIN_GROUPS["EXPERIMENTAL"]
def is_computational(evidence_type):
return evidence_type in EVIDENCE_MIN_GROUPS["COMPUTATIONAL"]
def get_evidence_min_group(evidence_type):
for group, codes in EVIDENCE_MIN_GROUPS.items():
if evidence_type in codes:
return group
return "ND"
def aspect_from_source(source):
if source == "molecular_function":
return "MF"
elif source == "biological_process":
return "BP"
elif source == "cellular_component":
return "CC"
return "UNK"
global_session = None
def requests_retry(retries = 3, backoff = 0.3, session = None):
session = session or requests.Session()
retry = Retry(
total = retries,
read = retries,
connect = retries,
backoff_factor = backoff,
status_forcelist = (429, 500, 502, 503, 504)
)
adapter = HTTPAdapter(max_retries=retry)
session.mount("http://", adapter)
session.mount("https://", adapter)
return session
def fetch(url):
"""
Error proof method to get data from HTTP request
If an error occured, return None
"""
global global_session
# Ensure we are using the same session - creating too many sessions could crash this script
if global_session is None:
global_session = requests_retry(global_session)
try:
r = global_session.get(url)
return r
except Exception as x:
print("Query GET " , url , " failed: ", x)
return None
def post(url, params):
global global_session
global_session = requests_retry(global_session)
try:
r = global_session.post(url, data = params)
return r
except Exception as x:
print("Query POST " , url , " failed: ", x)
return None
def golr_fetch(golr_base_url, select_query):
"""
Error proof method to get data from GOLr
If an HTTP error occurs, return None, otherwise return the json object
"""
r = fetch(golr_base_url + select_query)
if r is None:
return None
response = r.json()
return response
def golr_fetch_by_taxon(golr_base_url, select_query, taxon):
return golr_fetch(golr_base_url, select_query + "&fq=taxon:\"" + taxon + "\"")
def golr_fetch_by_taxa(golr_base_url, select_query, taxa):
tmp = ""
if isinstance(taxa, list):
tmp = "&fq=taxon:(\"" + taxa.join("\" ") + "\")"
else:
tmp = "&fq=taxon:\"" + taxa + "\""
print("*** ", golr_base_url + select_query + tmp)
return golr_fetch(golr_base_url, select_query + tmp)
# utility function to build a list from a solr/golr facet array
def build_list(items_list, min_size = None):
ls = []
for i in range(0, len(items_list), 2):
if min_size is None or items_list[i + 1] > min_size:
ls.append(items_list[i])
return ls
# utility function to transform a list [A, 1, B, 2] into a map {A: 1, B: 2}
def build_map(items_list, min_size = None):
map = {}
for i in range(0, len(items_list), 2):
if min_size is None or items_list[i + 1] > min_size:
map[items_list[i]] = items_list[i + 1]
return map
# utility function to build a reverse map: { "a": 1, "b": 1, "c": 2 } -> {1: ["a", "b"], 2: ["c"]}
def build_reverse_map(map):
reverse_map = { }
for key, val in map.items():
ls = []
if val in reverse_map:
ls = reverse_map[val]
else:
reverse_map[val] = ls
ls.append(key)
return reverse_map
# utility function to cluster elements of an input map based on another map of synonyms
def cluster_map(input_map, synonyms):
cluster = { }
for key, val in input_map.items():
temp = synonyms[key]
if temp in cluster:
val_cluster = cluster[temp]
cluster[temp] = val_cluster + val
else:
cluster[temp] = val
return cluster
# similar as above but the value of each key is also a map
def cluster_complex_map(input_map, synonyms):
cluster = { }
for key, val in input_map.items():
temp = synonyms[key]
# print("working on : " , key , val)
if temp in cluster:
temp_cluster = cluster[temp]
# print("cluster already found : ", temp , temp_cluster)
for key_cluster, val_cluster in temp_cluster.items():
temp_cluster[key_cluster] = val_cluster + val[key_cluster]
else:
cluster[temp] = val
return cluster
# reorder map (python 3.6 keeps order in which items are inserted in map: https://stackoverflow.com/questions/613183/how-do-i-sort-a-dictionary-by-value)
def ordered_map(map):
ordered_map = { }
for w in sorted(map, key=map.get, reverse=True):
ordered_map[w] = map[w]
return ordered_map
def extract_map(map, key_str):
extracted = { }
for key, val in map.items():
if key_str in key:
extracted[key] = val
return extracted
def merge_dict(dict_total, dict_diff):
new_dict = { }
for key, val in dict_total.items():
if type(val) == str:
new_dict[key] = val
elif type(val) == int or type(val) == float:
if val == 0:
diff_val = dict_diff[key] if key in dict_diff else 0
new_dict[key] = str(diff_val) + " / " + str(val) + "\t0%"
else:
diff_val = dict_diff[key] if key in dict_diff else 0
new_dict[key] = str(diff_val) + " / " + str(val) + "\t" + str(round(100 * diff_val / val, 2)) + "%"
elif type(val) == dict:
diff_val = dict_diff[key] if key in dict_diff else { }
new_dict[key] = merge_dict(val, diff_val)
else:
print("should not happened ! " , val , type(val))
return new_dict
def minus_dict(dict1, dict2):
new_dict = { }
for key, val in dict1.items():
if type(val) == str:
new_dict[key] = val
elif type(val) == int or type(val) == float:
diff_val = dict2[key] if key in dict2 else 0
new_dict[key] = val - diff_val
elif type(val) == dict:
diff_val = dict2[key] if key in dict2 else { }
new_dict[key] = merge_dict(val, diff_val)
else:
print("should not happened ! " , val , type(val))
return new_dict
def has_taxon(stats, taxon_id):
for taxon in stats["annotations"]["by_taxon"]:
if taxon_id in taxon:
return True
return False
def added_removed_species(current_stats, previous_stats):
results = {
"added" : { },
"removed" : { }
}
for taxon in current_stats["annotations"]["by_taxon"]:
taxon_id = taxon.split("|")[0]
if not has_taxon(previous_stats, taxon_id):
results["added"][taxon] = current_stats["annotations"]["by_taxon"][taxon]
for taxon in previous_stats["annotations"]["by_taxon"]:
taxon_id = taxon.split("|")[0]
if not has_taxon(current_stats, taxon_id):
results["removed"][taxon] = previous_stats["annotations"]["by_taxon"][taxon]
return results
def bioentity_type(str_type):
"""
In a nutshell, collapse all RNA related types into RNA
"""
if "RNA" in str_type or "ribozyme" in str_type or "transcript" in str_type:
return "RNA_cluster"
return str_type
def sum_map_values(map):
"""
Utility function to sum up the values of a map. Assume the map values are all numbers
"""
total = 0
for key, val in map.items():
total += val
return total
def write_json(key, content):
with open(key, 'w') as outfile:
try:
json.dump(content, outfile, indent=2)
finally:
outfile.close()
def write_text(key, content):
with open(key, 'w') as outfile:
try:
outfile.write(content)
finally:
outfile.close()
| 5,663
| 101
| 517
|
5fcb1e0039071dfe15f9923cb83138fdd37d3701
| 774
|
py
|
Python
|
tests/mixins/urls_mixin.py
|
Nyior/django-rest-paystack
|
fd74dd26703fe4ce63664736c2063ace7020f71a
|
[
"MIT"
] | 9
|
2021-12-12T17:59:15.000Z
|
2022-02-05T17:13:46.000Z
|
tests/mixins/urls_mixin.py
|
Nyior/django-rest-paystack
|
fd74dd26703fe4ce63664736c2063ace7020f71a
|
[
"MIT"
] | null | null | null |
tests/mixins/urls_mixin.py
|
Nyior/django-rest-paystack
|
fd74dd26703fe4ce63664736c2063ace7020f71a
|
[
"MIT"
] | 1
|
2021-12-21T18:57:03.000Z
|
2021-12-21T18:57:03.000Z
|
from django.urls import reverse
| 27.642857
| 78
| 0.706718
|
from django.urls import reverse
class URLsMixin(object):
def initiate_transaction_url(self):
return reverse("transaction-initiate")
def verify_transaction_url(self, trans_ref):
return reverse("transaction-verify") + f"?transaction_ref={trans_ref}"
def charge_customer_url(self):
return reverse("transaction-charge-customer")
def transaction_url(self, transaction_id):
return reverse("transaction-detail")
def all_transactions_url(self):
return reverse("transaction-list")
def webhook_handler_url(self):
return reverse("webhook-handler")
def get_customer_url(self, user_id):
return reverse("customer-detail")
def all_customers_url(self):
return reverse("customer-list")
| 500
| 3
| 238
|
841c91691e5e3f9f8e364f9c80db23924bcbaafd
| 102
|
py
|
Python
|
notebooks/exercise_solutions/n00_python_intro_data-structures.py
|
pydy/pydy-tutorial-human-standing
|
72b1d8513e339e9b10e501bd3490caa3fa997bc4
|
[
"CC-BY-4.0"
] | 134
|
2015-05-19T15:24:18.000Z
|
2022-03-12T09:39:03.000Z
|
notebooks/exercise_solutions/n00_python_intro_data-structures.py
|
pydy/pydy-tutorial-human-standing
|
72b1d8513e339e9b10e501bd3490caa3fa997bc4
|
[
"CC-BY-4.0"
] | 46
|
2015-05-05T18:08:20.000Z
|
2022-01-28T11:12:42.000Z
|
notebooks/exercise_solutions/n00_python_intro_data-structures.py
|
pydy/pydy-tutorial-pycon-2014
|
72b1d8513e339e9b10e501bd3490caa3fa997bc4
|
[
"CC-BY-4.0"
] | 62
|
2015-06-16T01:50:51.000Z
|
2022-02-26T07:39:41.000Z
|
num_list = [1,2,3,4]
months = ['Jan', 'Feb', 'Mar', 'Apr']
months_dict = dict(zip(months, num_list))
| 20.4
| 41
| 0.607843
|
num_list = [1,2,3,4]
months = ['Jan', 'Feb', 'Mar', 'Apr']
months_dict = dict(zip(months, num_list))
| 0
| 0
| 0
|
fe92b53f5b3777d23e5c45c05e94c9a44f57b7aa
| 345
|
py
|
Python
|
calingen/interfaces/__init__.py
|
Mischback/django-calingen
|
3354c751e29d301609ec44e64d69a8729ec36de4
|
[
"MIT"
] | null | null | null |
calingen/interfaces/__init__.py
|
Mischback/django-calingen
|
3354c751e29d301609ec44e64d69a8729ec36de4
|
[
"MIT"
] | 51
|
2021-11-15T20:44:19.000Z
|
2022-02-10T08:33:08.000Z
|
calingen/interfaces/__init__.py
|
Mischback/django-calingen
|
3354c751e29d301609ec44e64d69a8729ec36de4
|
[
"MIT"
] | null | null | null |
# SPDX-License-Identifier: MIT
"""The application's interfaces that are used to connect the different components.
Notes
-----
This package's code is not really specific to the Django framework. It is an
abstraction layer.
Primary focus is the provision of a plugin API, that allows the app to be
extendable with third-party applications.
"""
| 26.538462
| 82
| 0.773913
|
# SPDX-License-Identifier: MIT
"""The application's interfaces that are used to connect the different components.
Notes
-----
This package's code is not really specific to the Django framework. It is an
abstraction layer.
Primary focus is the provision of a plugin API, that allows the app to be
extendable with third-party applications.
"""
| 0
| 0
| 0
|
8b3f5bf2170e6f1f55a1f584308e631727c5174c
| 1,387
|
py
|
Python
|
Final Project/src/main.py
|
tig3r66/CMPUT275
|
dd5b94dcf0436e281f4696959db07b56f5c0b9d8
|
[
"MIT"
] | 1
|
2022-01-25T05:19:15.000Z
|
2022-01-25T05:19:15.000Z
|
Final Project/src/main.py
|
tig3r66/CMPUT275
|
dd5b94dcf0436e281f4696959db07b56f5c0b9d8
|
[
"MIT"
] | null | null | null |
Final Project/src/main.py
|
tig3r66/CMPUT275
|
dd5b94dcf0436e281f4696959db07b56f5c0b9d8
|
[
"MIT"
] | null | null | null |
# ===================================
# Name: Edward (Eddie) Guo
# ID: 1576381
# Partner: Jason Kim
# CMPUT 275, Fall 2020
#
# Final Assignment: EEG Visualizer
# ===================================
"""
Contains the QApplication which holds the PlotWindow QMainWindow object. The
controller class is here for convenient additions of extra QMainWindows.
"""
import sys
# for UI
from PyQt5 import QtCore, QtWidgets
from plot_window import PlotWindow
class Controller:
"""Controller class for slave QMainWindows. Used for expandability in case
the user wishes to create additional windows for the program (ex: home
window).
"""
def show_plot_window(self):
"""Creates the main window (EEG and FFT plots) from plot_window.py.
"""
self.plot_window = QtWidgets.QMainWindow()
self.ui = PlotWindow()
self.ui.setup_ui(self.plot_window)
self.plot_window.setWindowFlags(QtCore.Qt.Window)
self.plot_window.show()
app.aboutToQuit.connect(self.close_threads)
def close_threads(self):
"""Helper function that closes all running threads when the application
is about to quit.
"""
self.ui.close_threads()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
controller = Controller()
controller.show_plot_window()
sys.exit(app.exec_())
| 26.673077
| 79
| 0.651045
|
# ===================================
# Name: Edward (Eddie) Guo
# ID: 1576381
# Partner: Jason Kim
# CMPUT 275, Fall 2020
#
# Final Assignment: EEG Visualizer
# ===================================
"""
Contains the QApplication which holds the PlotWindow QMainWindow object. The
controller class is here for convenient additions of extra QMainWindows.
"""
import sys
# for UI
from PyQt5 import QtCore, QtWidgets
from plot_window import PlotWindow
class Controller:
"""Controller class for slave QMainWindows. Used for expandability in case
the user wishes to create additional windows for the program (ex: home
window).
"""
def show_plot_window(self):
"""Creates the main window (EEG and FFT plots) from plot_window.py.
"""
self.plot_window = QtWidgets.QMainWindow()
self.ui = PlotWindow()
self.ui.setup_ui(self.plot_window)
self.plot_window.setWindowFlags(QtCore.Qt.Window)
self.plot_window.show()
app.aboutToQuit.connect(self.close_threads)
def close_threads(self):
"""Helper function that closes all running threads when the application
is about to quit.
"""
self.ui.close_threads()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
controller = Controller()
controller.show_plot_window()
sys.exit(app.exec_())
| 0
| 0
| 0
|
9bc6d6d5809746ae0dfad11e6d7e815c885010be
| 929
|
py
|
Python
|
utilipy/data_utils/tests/test_init.py
|
nstarman/utilipy
|
17984942145d31126724df23500bafba18fb7516
|
[
"BSD-3-Clause"
] | 2
|
2020-11-15T01:48:45.000Z
|
2020-12-02T20:44:20.000Z
|
utilipy/data_utils/tests/test_init.py
|
nstarman/astroPHD
|
17984942145d31126724df23500bafba18fb7516
|
[
"BSD-3-Clause"
] | 22
|
2020-09-13T17:58:24.000Z
|
2022-02-04T19:05:23.000Z
|
utilipy/data_utils/tests/test_init.py
|
nstarman/utilipy
|
17984942145d31126724df23500bafba18fb7516
|
[
"BSD-3-Clause"
] | 1
|
2020-04-21T22:41:01.000Z
|
2020-04-21T22:41:01.000Z
|
# -*- coding: utf-8 -*-
"""Test Code in __init__."""
__all__ = [
"test_get_path_to_file",
]
##############################################################################
# IMPORTS
# BUILT-IN
import os.path
# PROJECT-SPECIFIC
from utilipy.data_utils.utils import get_path_to_file
##############################################################################
# PARAMETERS
##############################################################################
# CODE
##############################################################################
# /def
# -------------------------------------------------------------------
##############################################################################
# END
| 21.113636
| 78
| 0.339074
|
# -*- coding: utf-8 -*-
"""Test Code in __init__."""
__all__ = [
"test_get_path_to_file",
]
##############################################################################
# IMPORTS
# BUILT-IN
import os.path
# PROJECT-SPECIFIC
from utilipy.data_utils.utils import get_path_to_file
##############################################################################
# PARAMETERS
##############################################################################
# CODE
##############################################################################
def test_get_path_to_file():
path = get_path_to_file("__init__.py", package="utilipy.data_utils")
assert isinstance(path, str)
assert os.path.join("utilipy", "data_utils", "__init__.py") in path
# /def
# -------------------------------------------------------------------
##############################################################################
# END
| 187
| 0
| 23
|
22e65f52c1dd2e9a786884bce3811c3aa03273e2
| 2,579
|
py
|
Python
|
eStore/migrations/0005_auto_20210420_2220.py
|
masrufjaman/gas-n-go
|
435e574a1b1bbd875a8a7aeade4d4c2dc1636b07
|
[
"MIT"
] | null | null | null |
eStore/migrations/0005_auto_20210420_2220.py
|
masrufjaman/gas-n-go
|
435e574a1b1bbd875a8a7aeade4d4c2dc1636b07
|
[
"MIT"
] | 9
|
2021-03-22T18:36:25.000Z
|
2021-04-20T17:39:47.000Z
|
eStore/migrations/0005_auto_20210420_2220.py
|
masrufjaman/gas-n-go
|
435e574a1b1bbd875a8a7aeade4d4c2dc1636b07
|
[
"MIT"
] | 2
|
2021-06-30T14:39:52.000Z
|
2021-08-12T19:41:11.000Z
|
# Generated by Django 3.1.7 on 2021-04-20 16:20
from django.db import migrations, models
import django.db.models.deletion
| 40.936508
| 150
| 0.598682
|
# Generated by Django 3.1.7 on 2021-04-20 16:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0006_auto_20210406_1400'),
('eStore', '0004_item_discount_price'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='items',
),
migrations.AddField(
model_name='order',
name='transaction_id',
field=models.BooleanField(max_length=200, null=True),
),
migrations.AddField(
model_name='orderitem',
name='order',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='eStore.order'),
),
migrations.AddField(
model_name='orderitem',
name='quantity',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AddField(
model_name='orderitem',
name='username',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounts.customer'),
),
migrations.AlterField(
model_name='order',
name='username',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounts.customer', to_field='username'),
),
migrations.AlterField(
model_name='orderitem',
name='item',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='eStore.item'),
),
migrations.CreateModel(
name='ShippingAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=200, null=True)),
('city', models.CharField(max_length=200, null=True)),
('area', models.CharField(max_length=200, null=True)),
('road_no', models.CharField(max_length=200, null=True)),
('date_added', models.DateTimeField(auto_now_add=True)),
('order', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='eStore.order')),
('username', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounts.customer')),
],
),
]
| 0
| 2,432
| 23
|
72dfafe3d10bd2db54f014bbf5184b6be818ecf0
| 9,310
|
py
|
Python
|
sdk/python/kfp/v2/dsl/experimental/for_loop.py
|
ryansteakley/pipelines
|
98677b2190fb327be68e4bb0d00c520593707f21
|
[
"Apache-2.0"
] | 1
|
2021-10-23T00:39:47.000Z
|
2021-10-23T00:39:47.000Z
|
sdk/python/kfp/v2/dsl/experimental/for_loop.py
|
ryansteakley/pipelines
|
98677b2190fb327be68e4bb0d00c520593707f21
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/kfp/v2/dsl/experimental/for_loop.py
|
ryansteakley/pipelines
|
98677b2190fb327be68e4bb0d00c520593707f21
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods that supports argument for ParallelFor."""
import re
from typing import Any, Dict, List, Optional, Tuple, Union, get_type_hints
from kfp.v2.components.experimental import pipeline_channel
ItemList = List[Union[int, float, str, Dict[str, Any]]]
def _get_loop_item_type(type_name: str) -> Optional[str]:
"""Extracts the loop item type.
This method is used for extract the item type from a collection type.
For example:
List[str] -> str
typing.List[int] -> int
typing.Sequence[str] -> str
List -> None
str -> None
Args:
type_name: The collection type name, like `List`, Sequence`, etc.
Returns:
The collection item type or None if no match found.
"""
match = re.match('(typing\.)?(?:\w+)(?:\[(?P<item_type>.+)\])', type_name)
if match:
return match.group('item_type').lstrip().rstrip()
else:
return None
def _get_subvar_type(type_name: str) -> Optional[str]:
"""Extracts the subvar type.
This method is used for extract the value type from a dictionary type.
For example:
Dict[str, int] -> int
typing.Mapping[str, float] -> float
Args:
type_name: The dictionary type.
Returns:
The dictionary value type or None if no match found.
"""
match = re.match(
'(typing\.)?(?:\w+)(?:\[\s*(?:\w+)\s*,\s*(?P<value_type>.+)\])',
type_name)
if match:
return match.group('value_type').lstrip().rstrip()
else:
return None
class LoopArgument(pipeline_channel.PipelineChannel):
"""Represents the argument that are looped over in a ParallelFor loop.
The class shouldn't be instantiated by the end user, rather it is
created automatically by a ParallelFor ops group.
To create a LoopArgument instance, use one of its factory methods::
LoopArgument.from_pipeline_channel(...)
LoopArgument.from_raw_items(...)
Attributes:
items_or_pipeline_channel: The raw items or the PipelineChannel object
this LoopArgument is associated to.
"""
LOOP_ITEM_NAME_BASE = 'loop-item'
LOOP_ITEM_PARAM_NAME_BASE = 'loop-item-param'
def __init__(
self,
items: Union[ItemList, pipeline_channel.PipelineChannel],
name_code: Optional[str] = None,
name_override: Optional[str] = None,
**kwargs,
):
"""Initializes a LoopArguments object.
Args:
items: List of items to loop over. If a list of dicts then, all
dicts must have the same keys and every key must be a legal
Python variable name.
name_code: A unique code used to identify these loop arguments.
Should match the code for the ParallelFor ops_group which created
these LoopArguments. This prevents parameter name collisions.
name_override: The override name for PipelineChannel.
**kwargs: Any other keyword arguments passed down to PipelineChannel.
"""
if (name_code is None) == (name_override is None):
raise ValueError(
'Expect one and only one of `name_code` and `name_override` to '
'be specified.')
if name_override is None:
super().__init__(name=self._make_name(name_code), **kwargs)
else:
super().__init__(name=name_override, **kwargs)
if not isinstance(items,
(list, tuple, pipeline_channel.PipelineChannel)):
raise TypeError(
f'Expected list, tuple, or PipelineChannel, got {items}.')
if isinstance(items, tuple):
items = list(items)
self.items_or_pipeline_channel = items
self._referenced_subvars: Dict[str, LoopArgumentVariable] = {}
if isinstance(items, list) and isinstance(items[0], dict):
subvar_names = set(items[0].keys())
# then this block creates loop_arg.variable_a and loop_arg.variable_b
for subvar_name in subvar_names:
loop_arg_var = LoopArgumentVariable(
loop_argument=self,
subvar_name=subvar_name,
)
self._referenced_subvars[subvar_name] = loop_arg_var
setattr(self, subvar_name, loop_arg_var)
def _make_name(self, code: str):
"""Makes a name for this loop argument from a unique code."""
return '{}-{}'.format(self.LOOP_ITEM_PARAM_NAME_BASE, code)
@classmethod
def from_pipeline_channel(
cls,
channel: pipeline_channel.PipelineChannel,
) -> 'LoopArgument':
"""Creates a LoopArgument object from a PipelineChannel object."""
return LoopArgument(
items=channel,
name_override=channel.name + '-' + cls.LOOP_ITEM_NAME_BASE,
task_name=channel.task_name,
channel_type=_get_loop_item_type(channel.channel_type),
)
@classmethod
def from_raw_items(
cls,
raw_items: ItemList,
name_code: str,
) -> 'LoopArgument':
"""Creates a LoopArgument object from raw item list."""
if len(raw_items) == 0:
raise ValueError('Got an empty item list for loop argument.')
return LoopArgument(
items=raw_items,
name_code=name_code,
channel_type=type(raw_items[0]).__name__,
)
@classmethod
def name_is_loop_argument(cls, name: str) -> bool:
"""Returns True if the given channel name looks like a loop argument.
Either it came from a withItems loop item or withParams loop
item.
"""
return ('-' + cls.LOOP_ITEM_NAME_BASE) in name \
or (cls.LOOP_ITEM_PARAM_NAME_BASE + '-') in name
class LoopArgumentVariable(pipeline_channel.PipelineChannel):
"""Represents a subvariable for a loop argument.
This is used for cases where we're looping over maps, each of which contains
several variables. If the user ran:
with dsl.ParallelFor([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]) as item:
...
Then there's one LoopArgumentVariable for 'a' and another for 'b'.
Attributes:
loop_argument: The original LoopArgument object this subvariable is
attached to.
subvar_name: The subvariable name.
"""
SUBVAR_NAME_DELIMITER = '-subvar-'
LEGAL_SUBVAR_NAME_REGEX = re.compile(r'^[a-zA-Z_][0-9a-zA-Z_]*$')
def __init__(
self,
loop_argument: LoopArgument,
subvar_name: str,
):
"""Initializes a LoopArgumentVariable instance.
Args:
loop_argument: The LoopArgument object this subvariable is based on
a subvariable to.
subvar_name: The name of this subvariable, which is the name of the
dict key that spawned this subvariable.
Raises:
ValueError is subvar name is illegal.
"""
if not self._subvar_name_is_legal(subvar_name):
raise ValueError(
f'Tried to create subvariable named {subvar_name}, but that is '
'not a legal Python variable name.')
self.subvar_name = subvar_name
self.loop_argument = loop_argument
super().__init__(
name=self._get_name_override(
loop_arg_name=loop_argument.name,
subvar_name=subvar_name,
),
task_name=loop_argument.task_name,
channel_type=_get_subvar_type(loop_argument.channel_type),
)
def _subvar_name_is_legal(self, proposed_variable_name: str) -> bool:
"""Returns True if the subvar name is legal."""
return re.match(self.LEGAL_SUBVAR_NAME_REGEX,
proposed_variable_name) is not None
def _get_name_override(self, loop_arg_name: str, subvar_name: str) -> str:
"""Gets the name.
Args:
loop_arg_name: the name of the loop argument parameter that this
LoopArgumentVariable is attached to.
subvar_name: The name of this subvariable.
Returns:
The name of this loop arg variable.
"""
return f'{loop_arg_name}{self.SUBVAR_NAME_DELIMITER}{subvar_name}'
| 34.868914
| 81
| 0.628249
|
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods that supports argument for ParallelFor."""
import re
from typing import Any, Dict, List, Optional, Tuple, Union, get_type_hints
from kfp.v2.components.experimental import pipeline_channel
ItemList = List[Union[int, float, str, Dict[str, Any]]]
def _get_loop_item_type(type_name: str) -> Optional[str]:
"""Extracts the loop item type.
This method is used for extract the item type from a collection type.
For example:
List[str] -> str
typing.List[int] -> int
typing.Sequence[str] -> str
List -> None
str -> None
Args:
type_name: The collection type name, like `List`, Sequence`, etc.
Returns:
The collection item type or None if no match found.
"""
match = re.match('(typing\.)?(?:\w+)(?:\[(?P<item_type>.+)\])', type_name)
if match:
return match.group('item_type').lstrip().rstrip()
else:
return None
def _get_subvar_type(type_name: str) -> Optional[str]:
"""Extracts the subvar type.
This method is used for extract the value type from a dictionary type.
For example:
Dict[str, int] -> int
typing.Mapping[str, float] -> float
Args:
type_name: The dictionary type.
Returns:
The dictionary value type or None if no match found.
"""
match = re.match(
'(typing\.)?(?:\w+)(?:\[\s*(?:\w+)\s*,\s*(?P<value_type>.+)\])',
type_name)
if match:
return match.group('value_type').lstrip().rstrip()
else:
return None
class LoopArgument(pipeline_channel.PipelineChannel):
"""Represents the argument that are looped over in a ParallelFor loop.
The class shouldn't be instantiated by the end user, rather it is
created automatically by a ParallelFor ops group.
To create a LoopArgument instance, use one of its factory methods::
LoopArgument.from_pipeline_channel(...)
LoopArgument.from_raw_items(...)
Attributes:
items_or_pipeline_channel: The raw items or the PipelineChannel object
this LoopArgument is associated to.
"""
LOOP_ITEM_NAME_BASE = 'loop-item'
LOOP_ITEM_PARAM_NAME_BASE = 'loop-item-param'
def __init__(
self,
items: Union[ItemList, pipeline_channel.PipelineChannel],
name_code: Optional[str] = None,
name_override: Optional[str] = None,
**kwargs,
):
"""Initializes a LoopArguments object.
Args:
items: List of items to loop over. If a list of dicts then, all
dicts must have the same keys and every key must be a legal
Python variable name.
name_code: A unique code used to identify these loop arguments.
Should match the code for the ParallelFor ops_group which created
these LoopArguments. This prevents parameter name collisions.
name_override: The override name for PipelineChannel.
**kwargs: Any other keyword arguments passed down to PipelineChannel.
"""
if (name_code is None) == (name_override is None):
raise ValueError(
'Expect one and only one of `name_code` and `name_override` to '
'be specified.')
if name_override is None:
super().__init__(name=self._make_name(name_code), **kwargs)
else:
super().__init__(name=name_override, **kwargs)
if not isinstance(items,
(list, tuple, pipeline_channel.PipelineChannel)):
raise TypeError(
f'Expected list, tuple, or PipelineChannel, got {items}.')
if isinstance(items, tuple):
items = list(items)
self.items_or_pipeline_channel = items
self._referenced_subvars: Dict[str, LoopArgumentVariable] = {}
if isinstance(items, list) and isinstance(items[0], dict):
subvar_names = set(items[0].keys())
# then this block creates loop_arg.variable_a and loop_arg.variable_b
for subvar_name in subvar_names:
loop_arg_var = LoopArgumentVariable(
loop_argument=self,
subvar_name=subvar_name,
)
self._referenced_subvars[subvar_name] = loop_arg_var
setattr(self, subvar_name, loop_arg_var)
def __getattr__(self, name: str):
# this is being overridden so that we can access subvariables of the
# LoopArgument (i.e.: item.a) without knowing the subvariable names ahead
# of time.
return self._referenced_subvars.setdefault(
name, LoopArgumentVariable(
loop_argument=self,
subvar_name=name,
))
def _make_name(self, code: str):
"""Makes a name for this loop argument from a unique code."""
return '{}-{}'.format(self.LOOP_ITEM_PARAM_NAME_BASE, code)
@classmethod
def from_pipeline_channel(
cls,
channel: pipeline_channel.PipelineChannel,
) -> 'LoopArgument':
"""Creates a LoopArgument object from a PipelineChannel object."""
return LoopArgument(
items=channel,
name_override=channel.name + '-' + cls.LOOP_ITEM_NAME_BASE,
task_name=channel.task_name,
channel_type=_get_loop_item_type(channel.channel_type),
)
@classmethod
def from_raw_items(
cls,
raw_items: ItemList,
name_code: str,
) -> 'LoopArgument':
"""Creates a LoopArgument object from raw item list."""
if len(raw_items) == 0:
raise ValueError('Got an empty item list for loop argument.')
return LoopArgument(
items=raw_items,
name_code=name_code,
channel_type=type(raw_items[0]).__name__,
)
@classmethod
def name_is_loop_argument(cls, name: str) -> bool:
"""Returns True if the given channel name looks like a loop argument.
Either it came from a withItems loop item or withParams loop
item.
"""
return ('-' + cls.LOOP_ITEM_NAME_BASE) in name \
or (cls.LOOP_ITEM_PARAM_NAME_BASE + '-') in name
class LoopArgumentVariable(pipeline_channel.PipelineChannel):
"""Represents a subvariable for a loop argument.
This is used for cases where we're looping over maps, each of which contains
several variables. If the user ran:
with dsl.ParallelFor([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]) as item:
...
Then there's one LoopArgumentVariable for 'a' and another for 'b'.
Attributes:
loop_argument: The original LoopArgument object this subvariable is
attached to.
subvar_name: The subvariable name.
"""
SUBVAR_NAME_DELIMITER = '-subvar-'
LEGAL_SUBVAR_NAME_REGEX = re.compile(r'^[a-zA-Z_][0-9a-zA-Z_]*$')
def __init__(
self,
loop_argument: LoopArgument,
subvar_name: str,
):
"""Initializes a LoopArgumentVariable instance.
Args:
loop_argument: The LoopArgument object this subvariable is based on
a subvariable to.
subvar_name: The name of this subvariable, which is the name of the
dict key that spawned this subvariable.
Raises:
ValueError is subvar name is illegal.
"""
if not self._subvar_name_is_legal(subvar_name):
raise ValueError(
f'Tried to create subvariable named {subvar_name}, but that is '
'not a legal Python variable name.')
self.subvar_name = subvar_name
self.loop_argument = loop_argument
super().__init__(
name=self._get_name_override(
loop_arg_name=loop_argument.name,
subvar_name=subvar_name,
),
task_name=loop_argument.task_name,
channel_type=_get_subvar_type(loop_argument.channel_type),
)
def _subvar_name_is_legal(self, proposed_variable_name: str) -> bool:
"""Returns True if the subvar name is legal."""
return re.match(self.LEGAL_SUBVAR_NAME_REGEX,
proposed_variable_name) is not None
def _get_name_override(self, loop_arg_name: str, subvar_name: str) -> str:
"""Gets the name.
Args:
loop_arg_name: the name of the loop argument parameter that this
LoopArgumentVariable is attached to.
subvar_name: The name of this subvariable.
Returns:
The name of this loop arg variable.
"""
return f'{loop_arg_name}{self.SUBVAR_NAME_DELIMITER}{subvar_name}'
| 368
| 0
| 27
|
e0cb3175c59da0065800bb2675b16b000572cbc4
| 9,948
|
py
|
Python
|
.github/workflows/templates/generate.py
|
s0undt3ch/salt-bootstrap
|
11e5a237a922425c0e11608eec37bb4fde8d4577
|
[
"Apache-2.0"
] | null | null | null |
.github/workflows/templates/generate.py
|
s0undt3ch/salt-bootstrap
|
11e5a237a922425c0e11608eec37bb4fde8d4577
|
[
"Apache-2.0"
] | null | null | null |
.github/workflows/templates/generate.py
|
s0undt3ch/salt-bootstrap
|
11e5a237a922425c0e11608eec37bb4fde8d4577
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import datetime
os.chdir(os.path.abspath(os.path.dirname(__file__)))
LINUX_DISTROS = [
"almalinux-8",
"amazon-2",
"arch",
"centos-7",
"centos-8",
"debian-10",
"debian-11",
"debian-9",
"fedora-33",
"fedora-34",
"fedora-35",
"gentoo",
"gentoo-systemd",
"opensuse-15",
"opensuse-tumbleweed",
"oraclelinux-7",
"oraclelinux-8",
"rockylinux-8",
"ubuntu-1804",
"ubuntu-2004",
"ubuntu-2104",
]
OSX = WINDOWS = []
STABLE_DISTROS = [
"amazon-2",
"centos-7",
"centos-8",
"debian-10",
"debian-11",
"debian-9",
"fedora-33",
"fedora-34",
"fedora-35",
"gentoo",
"gentoo-systemd",
"oraclelinux-7",
"oraclelinux-8",
"ubuntu-1804",
"ubuntu-2004",
"ubuntu-2104",
]
PY2_BLACKLIST = [
"almalinux-8",
"centos-8",
"debian-10",
"debian-11",
"fedora-33",
"fedora-34",
"fedora-35",
"gentoo",
"gentoo-systemd",
"opensuse-15",
"opensuse-tumbleweed",
"oraclelinux-8",
"rockylinux-8",
"ubuntu-2004",
"ubuntu-2104",
]
BLACKLIST_3000 = [
"almalinux-8",
"debian-11",
"fedora-33",
"fedora-34",
"fedora-35",
"opensuse-tumbleweed",
"rockylinux-8",
"ubuntu-2004",
"ubuntu-2104",
]
BLACKLIST_3001 = [
"almalinux-8",
"debian-11",
"rockylinux-8",
"ubuntu-2104",
]
BLACKLIST_3001_0 = [
"almalinux-8",
"debian-11",
"gentoo",
"gentoo-systemd",
"rockylinux-8",
"ubuntu-2104",
]
BLACKLIST_3002_0 = [
"almalinux-8",
"debian-11",
"gentoo",
"gentoo-systemd",
"rockylinux-8",
"ubuntu-2104",
]
SALT_BRANCHES = [
"3000",
"3001",
"3001-0",
"3002",
"3002-0",
"master",
"latest",
]
BRANCH_DISPLAY_NAMES = {
"3000": "v3000",
"3001": "v3001",
"3001-0": "v3001.0",
"3002": "v3002",
"3002-0": "v3002.0",
"master": "Master",
"latest": "Latest",
}
STABLE_BRANCH_BLACKLIST = []
LATEST_PKG_BLACKLIST = []
DISTRO_DISPLAY_NAMES = {
"almalinux-8": "AlmaLinux 8",
"amazon-2": "Amazon 2",
"arch": "Arch",
"centos-7": "CentOS 7",
"centos-8": "CentOS 8",
"debian-10": "Debian 10",
"debian-11": "Debian 11",
"debian-9": "Debian 9",
"fedora-33": "Fedora 33",
"fedora-34": "Fedora 34",
"fedora-35": "Fedora 35",
"gentoo": "Gentoo",
"gentoo-systemd": "Gentoo (systemd)",
"opensuse-15": "Opensuse 15",
"opensuse-tumbleweed": "Opensuse Tumbleweed",
"oraclelinux-7": "Oracle Linux 7",
"oraclelinux-8": "Oracle Linux 8",
"rockylinux-8": "Rocky Linux 8",
"ubuntu-1804": "Ubuntu 18.04",
"ubuntu-2004": "Ubuntu 20.04",
"ubuntu-2104": "Ubuntu 21.04",
}
TIMEOUT_DEFAULT = 20
TIMEOUT_OVERRIDES = {
"gentoo": 90,
"gentoo-systemd": 90,
}
BRANCH_ONLY_OVERRIDES = [
"gentoo",
"gentoo-systemd",
]
if __name__ == "__main__":
generate_test_jobs()
| 28.918605
| 157
| 0.441998
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import datetime
os.chdir(os.path.abspath(os.path.dirname(__file__)))
LINUX_DISTROS = [
"almalinux-8",
"amazon-2",
"arch",
"centos-7",
"centos-8",
"debian-10",
"debian-11",
"debian-9",
"fedora-33",
"fedora-34",
"fedora-35",
"gentoo",
"gentoo-systemd",
"opensuse-15",
"opensuse-tumbleweed",
"oraclelinux-7",
"oraclelinux-8",
"rockylinux-8",
"ubuntu-1804",
"ubuntu-2004",
"ubuntu-2104",
]
OSX = WINDOWS = []
STABLE_DISTROS = [
"amazon-2",
"centos-7",
"centos-8",
"debian-10",
"debian-11",
"debian-9",
"fedora-33",
"fedora-34",
"fedora-35",
"gentoo",
"gentoo-systemd",
"oraclelinux-7",
"oraclelinux-8",
"ubuntu-1804",
"ubuntu-2004",
"ubuntu-2104",
]
PY2_BLACKLIST = [
"almalinux-8",
"centos-8",
"debian-10",
"debian-11",
"fedora-33",
"fedora-34",
"fedora-35",
"gentoo",
"gentoo-systemd",
"opensuse-15",
"opensuse-tumbleweed",
"oraclelinux-8",
"rockylinux-8",
"ubuntu-2004",
"ubuntu-2104",
]
BLACKLIST_3000 = [
"almalinux-8",
"debian-11",
"fedora-33",
"fedora-34",
"fedora-35",
"opensuse-tumbleweed",
"rockylinux-8",
"ubuntu-2004",
"ubuntu-2104",
]
BLACKLIST_3001 = [
"almalinux-8",
"debian-11",
"rockylinux-8",
"ubuntu-2104",
]
BLACKLIST_3001_0 = [
"almalinux-8",
"debian-11",
"gentoo",
"gentoo-systemd",
"rockylinux-8",
"ubuntu-2104",
]
BLACKLIST_3002_0 = [
"almalinux-8",
"debian-11",
"gentoo",
"gentoo-systemd",
"rockylinux-8",
"ubuntu-2104",
]
SALT_BRANCHES = [
"3000",
"3001",
"3001-0",
"3002",
"3002-0",
"master",
"latest",
]
BRANCH_DISPLAY_NAMES = {
"3000": "v3000",
"3001": "v3001",
"3001-0": "v3001.0",
"3002": "v3002",
"3002-0": "v3002.0",
"master": "Master",
"latest": "Latest",
}
STABLE_BRANCH_BLACKLIST = []
LATEST_PKG_BLACKLIST = []
DISTRO_DISPLAY_NAMES = {
"almalinux-8": "AlmaLinux 8",
"amazon-2": "Amazon 2",
"arch": "Arch",
"centos-7": "CentOS 7",
"centos-8": "CentOS 8",
"debian-10": "Debian 10",
"debian-11": "Debian 11",
"debian-9": "Debian 9",
"fedora-33": "Fedora 33",
"fedora-34": "Fedora 34",
"fedora-35": "Fedora 35",
"gentoo": "Gentoo",
"gentoo-systemd": "Gentoo (systemd)",
"opensuse-15": "Opensuse 15",
"opensuse-tumbleweed": "Opensuse Tumbleweed",
"oraclelinux-7": "Oracle Linux 7",
"oraclelinux-8": "Oracle Linux 8",
"rockylinux-8": "Rocky Linux 8",
"ubuntu-1804": "Ubuntu 18.04",
"ubuntu-2004": "Ubuntu 20.04",
"ubuntu-2104": "Ubuntu 21.04",
}
TIMEOUT_DEFAULT = 20
TIMEOUT_OVERRIDES = {
"gentoo": 90,
"gentoo-systemd": 90,
}
BRANCH_ONLY_OVERRIDES = [
"gentoo",
"gentoo-systemd",
]
def generate_test_jobs():
test_jobs = ""
branch_only_test_jobs = ""
for distro in LINUX_DISTROS + OSX + WINDOWS:
timeout_minutes = (
TIMEOUT_OVERRIDES[distro]
if distro in TIMEOUT_OVERRIDES
else TIMEOUT_DEFAULT
)
needs = " needs: lint"
if distro in BRANCH_ONLY_OVERRIDES:
needs = ""
current_test_jobs = ""
for branch in SALT_BRANCHES:
if branch == "latest":
if distro in LATEST_PKG_BLACKLIST:
continue
if distro in LINUX_DISTROS:
template = "linux.yml"
elif distro in OSX:
template = "osx.yml"
elif distro in WINDOWS:
template = "windows.yml"
else:
print("Don't know how to handle {}".format(distro))
with open(template) as rfh:
current_test_jobs += "\n{}\n".format(
rfh.read()
.replace(
"{python_version}-{bootstrap_type}-{branch}-{distro}",
"{branch}-{distro}",
)
.format(
distro=distro,
branch=branch,
display_name="{} Latest packaged release".format(
DISTRO_DISPLAY_NAMES[distro],
),
timeout_minutes=timeout_minutes,
needs=needs,
)
)
continue
for python_version in ("py2", "py3"):
if branch == "master" and python_version == "py2":
# Salt's master branch no longer supports Python 2
continue
try:
if int(branch.split("-")[0]) >= 3000 and python_version == "py2":
# Salt's 300X versions no longer supports Python 2
continue
except ValueError:
pass
for bootstrap_type in ("stable", "git"):
if bootstrap_type == "stable":
if branch == "master":
# For the master branch there's no stable build
continue
if distro not in STABLE_DISTROS:
continue
if branch in STABLE_BRANCH_BLACKLIST:
continue
if distro.startswith("fedora") and branch != "latest":
# Fedora does not keep old builds around
continue
if bootstrap_type == "git":
# .0 versions are a virtual version for pinning to the first point release of a major release, such as 3001, there is no git version.
if branch.endswith("-0"):
continue
if python_version == "py3":
if distro in ("arch"):
allowed_branches = ["master"]
try:
int_branch = int(branch)
if int_branch > 3000:
allowed_branches.append(branch)
except ValueError:
pass
if branch not in allowed_branches:
# Arch and Fedora default to py3.8
continue
if branch == "3000" and distro in BLACKLIST_3000:
continue
if branch == "3001" and distro in BLACKLIST_3001:
continue
if branch == "3001-0" and distro in BLACKLIST_3001_0:
continue
if branch == "3002-0" and distro in BLACKLIST_3002_0:
continue
if python_version == "py2" and distro in PY2_BLACKLIST:
continue
if distro in LINUX_DISTROS:
template = "linux.yml"
elif distro in OSX:
template = "osx.yml"
elif distro in WINDOWS:
template = "windows.yml"
else:
print("Don't know how to handle {}".format(distro))
with open(template) as rfh:
current_test_jobs += "\n{}\n".format(
rfh.read().format(
distro=distro,
branch=branch,
python_version=python_version,
bootstrap_type=bootstrap_type,
display_name="{} {} {} {}".format(
DISTRO_DISPLAY_NAMES[distro],
BRANCH_DISPLAY_NAMES[branch],
python_version.capitalize(),
bootstrap_type.capitalize(),
),
timeout_minutes=timeout_minutes,
needs=needs,
)
)
if distro in BRANCH_ONLY_OVERRIDES:
branch_only_test_jobs += current_test_jobs
else:
test_jobs += current_test_jobs
with open("lint.yml") as rfh:
lint_job = "\n{}\n".format(rfh.read())
with open("pre-commit.yml") as rfh:
pre_commit_job = "\n{}\n".format(rfh.read())
with open("../main.yml", "w") as wfh:
with open("main.yml") as rfh:
wfh.write(
"{}\n".format(
rfh.read()
.format(
jobs="{pre_commit}{lint}{test}".format(
lint=lint_job, test=test_jobs, pre_commit=pre_commit_job,
),
on="push, pull_request",
name="Testing",
)
.strip()
)
)
with open("../main-branch-only.yml", "w") as wfh:
with open("main.yml") as rfh:
wfh.write(
"{}\n".format(
rfh.read()
.format(
jobs="{test}".format(test=branch_only_test_jobs,),
on="push",
name="Branch Testing",
)
.strip()
)
)
if __name__ == "__main__":
generate_test_jobs()
| 6,927
| 0
| 23
|
4c2b178af364b6b782db82646942cb0a6c95a702
| 17,831
|
py
|
Python
|
qiskit/aqua/algorithms/single_sample/shor/shor.py
|
Nick-Singstock/qiskit-aqua
|
8c2bc57b78dec447faec3adbc966471a3206c2ef
|
[
"Apache-2.0"
] | 1
|
2020-11-06T01:09:28.000Z
|
2020-11-06T01:09:28.000Z
|
qiskit/aqua/algorithms/single_sample/shor/shor.py
|
Nick-Singstock/qiskit-aqua
|
8c2bc57b78dec447faec3adbc966471a3206c2ef
|
[
"Apache-2.0"
] | null | null | null |
qiskit/aqua/algorithms/single_sample/shor/shor.py
|
Nick-Singstock/qiskit-aqua
|
8c2bc57b78dec447faec3adbc966471a3206c2ef
|
[
"Apache-2.0"
] | 1
|
2020-11-06T01:09:43.000Z
|
2020-11-06T01:09:43.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM Corp. 2017 and later.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
The Shor's Factoring algorithm.
"""
import math
import array
import fractions
import logging
import numpy as np
from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister
from qiskit.aqua.utils.arithmetic import is_power
from qiskit.aqua import AquaError, Pluggable
from qiskit.aqua.utils import get_subsystem_density_matrix
from qiskit.aqua.algorithms import QuantumAlgorithm
from qiskit.aqua.circuits import FourierTransformCircuits as ftc
from qiskit.aqua.circuits.gates import mcu1
from qiskit.aqua.utils import summarize_circuits
logger = logging.getLogger(__name__)
class Shor(QuantumAlgorithm):
"""
The Shor's Factoring algorithm.
Adapted from https://github.com/ttlion/ShorAlgQiskit
"""
PROP_N = 'N'
PROP_A = 'a'
CONFIGURATION = {
'name': 'Shor',
'description': "The Shor's Factoring Algorithm",
'input_schema': {
'$schema': 'http://json-schema.org/schema#',
'id': 'shor_schema',
'type': 'object',
'properties': {
PROP_N: {
'type': 'integer',
'default': 15,
'minimum': 3
},
PROP_A: {
'type': 'integer',
'default': 2,
'minimum': 2
},
},
'additionalProperties': False
},
'problems': ['factoring'],
}
def __init__(self, N=15, a=2):
"""
Constructor.
Args:
N (int): The integer to be factored.
a (int): A random integer a that satisfies a < N and gcd(a, N) = 1
"""
self.validate(locals())
super().__init__()
# check the input integer
if N < 1 or N % 2 == 0:
raise AquaError('The input needs to be an odd integer greater than 1.')
self._N = N
if a >= N or math.gcd(a, self._N) != 1:
raise AquaError('The integer a needs to satisfy a < N and gcd(a, N) = 1.')
self._a = a
self._ret = {'factors': []}
# check if the input integer is a power
tf, b, p = is_power(N, return_decomposition=True)
if tf:
logger.info('The input integer is a power: {}={}^{}.'.format(N, b, p))
self._ret['factors'].append(b)
@classmethod
def init_params(cls, params, algo_input):
"""
Initialize via parameters dictionary and algorithm input instance.
Args:
params: parameters dictionary
algo_input: input instance
"""
if algo_input is not None:
raise AquaError("Input instance not supported.")
shor_params = params.get(Pluggable.SECTION_KEY_ALGORITHM)
N = shor_params.get(Shor.PROP_N)
return cls(N)
def _get_angles(self, a):
"""
Calculate the array of angles to be used in the addition in Fourier Space
"""
s = bin(int(a))[2:].zfill(self._n + 1)
angles = np.zeros([self._n + 1])
for i in range(0, self._n + 1):
for j in range(i, self._n + 1):
if s[j] == '1':
angles[self._n - i] += math.pow(2, -(j - i))
angles[self._n - i] *= np.pi
return angles
def _phi_add(self, circuit, q, inverse=False):
"""
Creation of the circuit that performs addition by a in Fourier Space
Can also be used for subtraction by setting the parameter inverse=True
"""
angle = self._get_angles(self._N)
for i in range(0, self._n + 1):
circuit.u1(-angle[i] if inverse else angle[i], q[i])
def _controlled_phi_add(self, circuit, q, ctl, inverse=False):
"""
Single controlled version of the _phi_add circuit
"""
angles = self._get_angles(self._N)
for i in range(0, self._n + 1):
angle = (-angles[i] if inverse else angles[i]) / 2
circuit.u1(angle, ctl)
circuit.cx(ctl, q[i])
circuit.u1(-angle, q[i])
circuit.cx(ctl, q[i])
circuit.u1(angle, q[i])
def _controlled_controlled_phi_add(self, circuit, q, ctl1, ctl2, a, inverse=False):
"""
Doubly controlled version of the _phi_add circuit
"""
angle = self._get_angles(a)
for i in range(self._n + 1):
# ccphase(circuit, -angle[i] if inverse else angle[i], ctl1, ctl2, q[i])
circuit.mcu1(-angle[i] if inverse else angle[i], [ctl1, ctl2], q[i])
def _controlled_controlled_phi_add_mod_N(self, circuit, q, ctl1, ctl2, aux, a):
"""
Circuit that implements doubly controlled modular addition by a
"""
self._controlled_controlled_phi_add(circuit, q, ctl1, ctl2, a)
self._phi_add(circuit, q, inverse=True)
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False,
inverse=True
)
circuit.cx(q[self._n], aux)
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False
)
self._controlled_phi_add(circuit, q, aux)
self._controlled_controlled_phi_add(circuit, q, ctl1, ctl2, a, inverse=True)
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False,
inverse=True
)
circuit.u3(np.pi, 0, np.pi, q[self._n])
circuit.cx(q[self._n], aux)
circuit.u3(np.pi, 0, np.pi, q[self._n])
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False
)
self._controlled_controlled_phi_add(circuit, q, ctl1, ctl2, a)
def _controlled_controlled_phi_add_mod_N_inv(self, circuit, q, ctl1, ctl2, aux, a):
"""
Circuit that implements the inverse of doubly controlled modular addition by a
"""
self._controlled_controlled_phi_add(circuit, q, ctl1, ctl2, a, inverse=True)
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False,
inverse=True
)
circuit.u3(np.pi, 0, np.pi, q[self._n])
circuit.cx(q[self._n], aux)
circuit.u3(np.pi, 0, np.pi, q[self._n])
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False
)
self._controlled_controlled_phi_add(circuit, q, ctl1, ctl2, a)
self._controlled_phi_add(circuit, q, aux, inverse=True)
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False,
inverse=True
)
circuit.cx(q[self._n], aux)
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False
)
self._phi_add(circuit, q)
self._controlled_controlled_phi_add(circuit, q, ctl1, ctl2, a, inverse=True)
def _controlled_multiple_mod_N(self, circuit, ctl, q, aux, a):
"""
Circuit that implements single controlled modular multiplication by a
"""
ftc.construct_circuit(
circuit=circuit,
qubits=[aux[i] for i in reversed(range(self._n + 1))],
do_swaps=False
)
for i in range(0, self._n):
self._controlled_controlled_phi_add_mod_N(
circuit,
aux,
q[i],
ctl,
aux[self._n + 1],
(2 ** i) * a % self._N
)
ftc.construct_circuit(
circuit=circuit,
qubits=[aux[i] for i in reversed(range(self._n + 1))],
do_swaps=False,
inverse=True
)
for i in range(0, self._n):
circuit.cswap(ctl, q[i], aux[i])
a_inv = modinv(a, self._N)
ftc.construct_circuit(
circuit=circuit,
qubits=[aux[i] for i in reversed(range(self._n + 1))],
do_swaps=False
)
for i in reversed(range(self._n)):
self._controlled_controlled_phi_add_mod_N_inv(
circuit,
aux,
q[i],
ctl,
aux[self._n + 1],
math.pow(2, i) * a_inv % self._N
)
ftc.construct_circuit(
circuit=circuit,
qubits=[aux[i] for i in reversed(range(self._n + 1))],
do_swaps=False,
inverse=True
)
def construct_circuit(self):
"""Construct circuit.
Returns:
QuantumCircuit: quantum circuit.
"""
# Get n value used in Shor's algorithm, to know how many qubits are used
self._n = math.ceil(math.log(self._N, 2))
# quantum register where the sequential QFT is performed
self._up_qreg = QuantumRegister(2 * self._n, name='up')
# quantum register where the multiplications are made
self._down_qreg = QuantumRegister(self._n, name='down')
# auxilliary quantum register used in addition and multiplication
self._aux_qreg = QuantumRegister(self._n + 2, name='aux')
# Create Quantum Circuit
circuit = QuantumCircuit(self._up_qreg, self._down_qreg, self._aux_qreg)
# Initialize down register to 1 and create maximal superposition in top register
circuit.u2(0, np.pi, self._up_qreg)
circuit.u3(np.pi, 0, np.pi, self._down_qreg[0])
# Apply the multiplication gates as showed in the report in order to create the exponentiation
for i in range(0, 2 * self._n):
self._controlled_multiple_mod_N(
circuit,
self._up_qreg[i],
self._down_qreg,
self._aux_qreg,
int(pow(self._a, pow(2, i)))
)
# Apply inverse QFT
ftc.construct_circuit(circuit=circuit, qubits=self._up_qreg, do_swaps=True, inverse=True)
logger.info(summarize_circuits(circuit))
return circuit
def _get_factors(self, output_desired, t_upper):
"""
Apply the continued fractions to find r and the gcd to find the desired factors.
"""
x_value = int(output_desired, 2)
logger.info('In decimal, x_final value for this result is: {0}.'.format(x_value))
if x_value <= 0:
self._ret['results'][output_desired] = 'x_value is <= 0, there are no continued fractions.'
return False
logger.debug('Running continued fractions for this case.')
# Calculate T and x/T
T = pow(2, t_upper)
x_over_T = x_value / T
# Cycle in which each iteration corresponds to putting one more term in the
# calculation of the Continued Fraction (CF) of x/T
# Initialize the first values according to CF rule
i = 0
b = array.array('i')
t = array.array('f')
b.append(math.floor(x_over_T))
t.append(x_over_T - b[i])
while i >= 0:
# From the 2nd iteration onwards, calculate the new terms of the CF based
# on the previous terms as the rule suggests
if i > 0:
b.append(math.floor(1 / t[i - 1]))
t.append((1 / t[i - 1]) - b[i])
# Calculate the CF using the known terms
aux = 0
j = i
while j > 0:
aux = 1 / (b[j] + aux)
j = j - 1
aux = aux + b[0]
# Get the denominator from the value obtained
frac = fractions.Fraction(aux).limit_denominator()
denominator = frac.denominator
logger.debug('Approximation number {0} of continued fractions:'.format(i + 1))
logger.debug("Numerator:{0} \t\t Denominator: {1}.".format(frac.numerator, frac.denominator))
# Increment i for next iteration
i = i + 1
if denominator % 2 == 1:
if i >= self._N:
self._ret['results'][output_desired] = 'unable to find factors after too many attempts.'
return False
logger.debug('Odd denominator, will try next iteration of continued fractions.')
continue
# If denominator even, try to get factors of N
# Get the exponential a^(r/2)
exponential = 0
if denominator < 1000:
exponential = pow(self._a, denominator / 2)
# Check if the value is too big or not
if math.isinf(exponential) or exponential > 1000000000:
self._ret['results'][output_desired] = 'denominator of continued fraction is too big.'
return False
# If the value is not to big (infinity), then get the right values and do the proper gcd()
putting_plus = int(exponential + 1)
putting_minus = int(exponential - 1)
one_factor = math.gcd(putting_plus, self._N)
other_factor = math.gcd(putting_minus, self._N)
# Check if the factors found are trivial factors or are the desired factors
if one_factor == 1 or one_factor == self._N or other_factor == 1 or other_factor == self._N:
logger.debug('Found just trivial factors, not good enough.')
# Check if the number has already been found, use i-1 because i was already incremented
if t[i - 1] == 0:
self._ret['results'][output_desired] = 'the continued fractions found exactly x_final/(2^(2n)).'
return False
if i >= self._N:
self._ret['results'][output_desired] = 'unable to find factors after too many attempts.'
return False
else:
logger.debug('The factors of {0} are {1} and {2}.'.format(self._N, one_factor, other_factor))
logger.debug('Found the desired factors.')
self._ret['results'][output_desired] = (one_factor, other_factor)
factors = sorted((one_factor, other_factor))
if factors not in self._ret['factors']:
self._ret['factors'].append(factors)
return True
| 36.464213
| 119
| 0.555213
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM Corp. 2017 and later.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
The Shor's Factoring algorithm.
"""
import math
import array
import fractions
import logging
import numpy as np
from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister
from qiskit.aqua.utils.arithmetic import is_power
from qiskit.aqua import AquaError, Pluggable
from qiskit.aqua.utils import get_subsystem_density_matrix
from qiskit.aqua.algorithms import QuantumAlgorithm
from qiskit.aqua.circuits import FourierTransformCircuits as ftc
from qiskit.aqua.circuits.gates import mcu1
from qiskit.aqua.utils import summarize_circuits
logger = logging.getLogger(__name__)
class Shor(QuantumAlgorithm):
"""
The Shor's Factoring algorithm.
Adapted from https://github.com/ttlion/ShorAlgQiskit
"""
PROP_N = 'N'
PROP_A = 'a'
CONFIGURATION = {
'name': 'Shor',
'description': "The Shor's Factoring Algorithm",
'input_schema': {
'$schema': 'http://json-schema.org/schema#',
'id': 'shor_schema',
'type': 'object',
'properties': {
PROP_N: {
'type': 'integer',
'default': 15,
'minimum': 3
},
PROP_A: {
'type': 'integer',
'default': 2,
'minimum': 2
},
},
'additionalProperties': False
},
'problems': ['factoring'],
}
def __init__(self, N=15, a=2):
"""
Constructor.
Args:
N (int): The integer to be factored.
a (int): A random integer a that satisfies a < N and gcd(a, N) = 1
"""
self.validate(locals())
super().__init__()
# check the input integer
if N < 1 or N % 2 == 0:
raise AquaError('The input needs to be an odd integer greater than 1.')
self._N = N
if a >= N or math.gcd(a, self._N) != 1:
raise AquaError('The integer a needs to satisfy a < N and gcd(a, N) = 1.')
self._a = a
self._ret = {'factors': []}
# check if the input integer is a power
tf, b, p = is_power(N, return_decomposition=True)
if tf:
logger.info('The input integer is a power: {}={}^{}.'.format(N, b, p))
self._ret['factors'].append(b)
@classmethod
def init_params(cls, params, algo_input):
"""
Initialize via parameters dictionary and algorithm input instance.
Args:
params: parameters dictionary
algo_input: input instance
"""
if algo_input is not None:
raise AquaError("Input instance not supported.")
shor_params = params.get(Pluggable.SECTION_KEY_ALGORITHM)
N = shor_params.get(Shor.PROP_N)
return cls(N)
def _get_angles(self, a):
"""
Calculate the array of angles to be used in the addition in Fourier Space
"""
s = bin(int(a))[2:].zfill(self._n + 1)
angles = np.zeros([self._n + 1])
for i in range(0, self._n + 1):
for j in range(i, self._n + 1):
if s[j] == '1':
angles[self._n - i] += math.pow(2, -(j - i))
angles[self._n - i] *= np.pi
return angles
def _phi_add(self, circuit, q, inverse=False):
"""
Creation of the circuit that performs addition by a in Fourier Space
Can also be used for subtraction by setting the parameter inverse=True
"""
angle = self._get_angles(self._N)
for i in range(0, self._n + 1):
circuit.u1(-angle[i] if inverse else angle[i], q[i])
def _controlled_phi_add(self, circuit, q, ctl, inverse=False):
"""
Single controlled version of the _phi_add circuit
"""
angles = self._get_angles(self._N)
for i in range(0, self._n + 1):
angle = (-angles[i] if inverse else angles[i]) / 2
circuit.u1(angle, ctl)
circuit.cx(ctl, q[i])
circuit.u1(-angle, q[i])
circuit.cx(ctl, q[i])
circuit.u1(angle, q[i])
def _controlled_controlled_phi_add(self, circuit, q, ctl1, ctl2, a, inverse=False):
"""
Doubly controlled version of the _phi_add circuit
"""
angle = self._get_angles(a)
for i in range(self._n + 1):
# ccphase(circuit, -angle[i] if inverse else angle[i], ctl1, ctl2, q[i])
circuit.mcu1(-angle[i] if inverse else angle[i], [ctl1, ctl2], q[i])
def _controlled_controlled_phi_add_mod_N(self, circuit, q, ctl1, ctl2, aux, a):
"""
Circuit that implements doubly controlled modular addition by a
"""
self._controlled_controlled_phi_add(circuit, q, ctl1, ctl2, a)
self._phi_add(circuit, q, inverse=True)
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False,
inverse=True
)
circuit.cx(q[self._n], aux)
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False
)
self._controlled_phi_add(circuit, q, aux)
self._controlled_controlled_phi_add(circuit, q, ctl1, ctl2, a, inverse=True)
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False,
inverse=True
)
circuit.u3(np.pi, 0, np.pi, q[self._n])
circuit.cx(q[self._n], aux)
circuit.u3(np.pi, 0, np.pi, q[self._n])
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False
)
self._controlled_controlled_phi_add(circuit, q, ctl1, ctl2, a)
def _controlled_controlled_phi_add_mod_N_inv(self, circuit, q, ctl1, ctl2, aux, a):
"""
Circuit that implements the inverse of doubly controlled modular addition by a
"""
self._controlled_controlled_phi_add(circuit, q, ctl1, ctl2, a, inverse=True)
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False,
inverse=True
)
circuit.u3(np.pi, 0, np.pi, q[self._n])
circuit.cx(q[self._n], aux)
circuit.u3(np.pi, 0, np.pi, q[self._n])
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False
)
self._controlled_controlled_phi_add(circuit, q, ctl1, ctl2, a)
self._controlled_phi_add(circuit, q, aux, inverse=True)
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False,
inverse=True
)
circuit.cx(q[self._n], aux)
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False
)
self._phi_add(circuit, q)
self._controlled_controlled_phi_add(circuit, q, ctl1, ctl2, a, inverse=True)
def _controlled_multiple_mod_N(self, circuit, ctl, q, aux, a):
"""
Circuit that implements single controlled modular multiplication by a
"""
ftc.construct_circuit(
circuit=circuit,
qubits=[aux[i] for i in reversed(range(self._n + 1))],
do_swaps=False
)
for i in range(0, self._n):
self._controlled_controlled_phi_add_mod_N(
circuit,
aux,
q[i],
ctl,
aux[self._n + 1],
(2 ** i) * a % self._N
)
ftc.construct_circuit(
circuit=circuit,
qubits=[aux[i] for i in reversed(range(self._n + 1))],
do_swaps=False,
inverse=True
)
for i in range(0, self._n):
circuit.cswap(ctl, q[i], aux[i])
def modinv(a, m):
def egcd(a, b):
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
g, x, y = egcd(a, m)
if g != 1:
raise Exception('modular inverse does not exist')
else:
return x % m
a_inv = modinv(a, self._N)
ftc.construct_circuit(
circuit=circuit,
qubits=[aux[i] for i in reversed(range(self._n + 1))],
do_swaps=False
)
for i in reversed(range(self._n)):
self._controlled_controlled_phi_add_mod_N_inv(
circuit,
aux,
q[i],
ctl,
aux[self._n + 1],
math.pow(2, i) * a_inv % self._N
)
ftc.construct_circuit(
circuit=circuit,
qubits=[aux[i] for i in reversed(range(self._n + 1))],
do_swaps=False,
inverse=True
)
def construct_circuit(self):
"""Construct circuit.
Returns:
QuantumCircuit: quantum circuit.
"""
# Get n value used in Shor's algorithm, to know how many qubits are used
self._n = math.ceil(math.log(self._N, 2))
# quantum register where the sequential QFT is performed
self._up_qreg = QuantumRegister(2 * self._n, name='up')
# quantum register where the multiplications are made
self._down_qreg = QuantumRegister(self._n, name='down')
# auxilliary quantum register used in addition and multiplication
self._aux_qreg = QuantumRegister(self._n + 2, name='aux')
# Create Quantum Circuit
circuit = QuantumCircuit(self._up_qreg, self._down_qreg, self._aux_qreg)
# Initialize down register to 1 and create maximal superposition in top register
circuit.u2(0, np.pi, self._up_qreg)
circuit.u3(np.pi, 0, np.pi, self._down_qreg[0])
# Apply the multiplication gates as showed in the report in order to create the exponentiation
for i in range(0, 2 * self._n):
self._controlled_multiple_mod_N(
circuit,
self._up_qreg[i],
self._down_qreg,
self._aux_qreg,
int(pow(self._a, pow(2, i)))
)
# Apply inverse QFT
ftc.construct_circuit(circuit=circuit, qubits=self._up_qreg, do_swaps=True, inverse=True)
logger.info(summarize_circuits(circuit))
return circuit
def _get_factors(self, output_desired, t_upper):
"""
Apply the continued fractions to find r and the gcd to find the desired factors.
"""
x_value = int(output_desired, 2)
logger.info('In decimal, x_final value for this result is: {0}.'.format(x_value))
if x_value <= 0:
self._ret['results'][output_desired] = 'x_value is <= 0, there are no continued fractions.'
return False
logger.debug('Running continued fractions for this case.')
# Calculate T and x/T
T = pow(2, t_upper)
x_over_T = x_value / T
# Cycle in which each iteration corresponds to putting one more term in the
# calculation of the Continued Fraction (CF) of x/T
# Initialize the first values according to CF rule
i = 0
b = array.array('i')
t = array.array('f')
b.append(math.floor(x_over_T))
t.append(x_over_T - b[i])
while i >= 0:
# From the 2nd iteration onwards, calculate the new terms of the CF based
# on the previous terms as the rule suggests
if i > 0:
b.append(math.floor(1 / t[i - 1]))
t.append((1 / t[i - 1]) - b[i])
# Calculate the CF using the known terms
aux = 0
j = i
while j > 0:
aux = 1 / (b[j] + aux)
j = j - 1
aux = aux + b[0]
# Get the denominator from the value obtained
frac = fractions.Fraction(aux).limit_denominator()
denominator = frac.denominator
logger.debug('Approximation number {0} of continued fractions:'.format(i + 1))
logger.debug("Numerator:{0} \t\t Denominator: {1}.".format(frac.numerator, frac.denominator))
# Increment i for next iteration
i = i + 1
if denominator % 2 == 1:
if i >= self._N:
self._ret['results'][output_desired] = 'unable to find factors after too many attempts.'
return False
logger.debug('Odd denominator, will try next iteration of continued fractions.')
continue
# If denominator even, try to get factors of N
# Get the exponential a^(r/2)
exponential = 0
if denominator < 1000:
exponential = pow(self._a, denominator / 2)
# Check if the value is too big or not
if math.isinf(exponential) or exponential > 1000000000:
self._ret['results'][output_desired] = 'denominator of continued fraction is too big.'
return False
# If the value is not to big (infinity), then get the right values and do the proper gcd()
putting_plus = int(exponential + 1)
putting_minus = int(exponential - 1)
one_factor = math.gcd(putting_plus, self._N)
other_factor = math.gcd(putting_minus, self._N)
# Check if the factors found are trivial factors or are the desired factors
if one_factor == 1 or one_factor == self._N or other_factor == 1 or other_factor == self._N:
logger.debug('Found just trivial factors, not good enough.')
# Check if the number has already been found, use i-1 because i was already incremented
if t[i - 1] == 0:
self._ret['results'][output_desired] = 'the continued fractions found exactly x_final/(2^(2n)).'
return False
if i >= self._N:
self._ret['results'][output_desired] = 'unable to find factors after too many attempts.'
return False
else:
logger.debug('The factors of {0} are {1} and {2}.'.format(self._N, one_factor, other_factor))
logger.debug('Found the desired factors.')
self._ret['results'][output_desired] = (one_factor, other_factor)
factors = sorted((one_factor, other_factor))
if factors not in self._ret['factors']:
self._ret['factors'].append(factors)
return True
def _run(self):
if not self._ret['factors']:
logger.debug('Running with N={} and a={}.'.format(self._N, self._a))
circuit = self.construct_circuit()
if self._quantum_instance.is_statevector:
logger.warning('The statevector_simulator might lead to subsequent computation using too much memory.')
result = self._quantum_instance.execute(circuit)
complete_state_vec = result.get_statevector(circuit)
# TODO: this uses too much memory
up_qreg_density_mat = get_subsystem_density_matrix(
complete_state_vec,
range(2 * self._n, 4 * self._n + 2)
)
up_qreg_density_mat_diag = np.diag(up_qreg_density_mat)
counts = dict()
for i, v in enumerate(up_qreg_density_mat_diag):
if not v == 0:
counts[bin(int(i))[2:].zfill(2 * self._n)] = v ** 2
else:
up_cqreg = ClassicalRegister(2 * self._n, name='m')
circuit.add_register(up_cqreg)
circuit.measure(self._up_qreg, up_cqreg)
counts = self._quantum_instance.execute(circuit).get_counts(circuit)
self._ret['results'] = dict()
# For each simulation result, print proper info to user and try to calculate the factors of N
for output_desired in list(counts.keys()):
# Get the x_value from the final state qubits
logger.info("------> Analyzing result {0}.".format(output_desired))
self._ret['results'][output_desired] = None
success = self._get_factors(output_desired, int(2 * self._n))
if success:
logger.info('Found factors {} from measurement {}.'.format(
self._ret['results'][output_desired], output_desired
))
else:
logger.info('Cannot find factors from measurement {} because {}'.format(
output_desired, self._ret['results'][output_desired]
))
return self._ret
| 2,572
| 0
| 58
|
14d1776a23dbeff91b7b113a7ec6193886a74ae5
| 3,802
|
py
|
Python
|
src/game.py
|
Ale-XYX/Contrast
|
6daf08e14826fbe382a6a8bbaa53f6c5a0494383
|
[
"Apache-2.0"
] | null | null | null |
src/game.py
|
Ale-XYX/Contrast
|
6daf08e14826fbe382a6a8bbaa53f6c5a0494383
|
[
"Apache-2.0"
] | null | null | null |
src/game.py
|
Ale-XYX/Contrast
|
6daf08e14826fbe382a6a8bbaa53f6c5a0494383
|
[
"Apache-2.0"
] | 2
|
2020-02-03T14:04:11.000Z
|
2020-05-15T16:44:33.000Z
|
import re
import bz2
import pygame
import public
import sprites
import functions
import dictionaries
import random
# :^)
| 25.689189
| 80
| 0.583377
|
import re
import bz2
import pygame
import public
import sprites
import functions
import dictionaries
import random
def title(debug):
pygame.display.set_caption('Contrast')
pygame.display.set_icon(pygame.image.fromstring(bz2.decompress(
dictionaries.MEDIA['icon']), (32, 32), 'RGBA'))
info_text = public.FONT_LG.render(
'ENTER TO BEGIN', False, [public.WHITE] * 3)
play_button = sprites.Button((343, 290), 'Play')
music_button = sprites.Button((407, 290), 'Music')
button_cover = pygame.Surface((public.SWIDTH, 10))
button_cover.fill([public.BLACK] * 3)
if len(debug) != 1:
public.music = False
m = re.search('map_(.+?).tmx', debug[1])
if m:
public.level = int(m.group(1))
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return 0
public.all_sprites.update()
if public.end_title:
game()
return 0
public.screen.fill([public.BLACK] * 3)
public.screen.blit(dictionaries.IMAGES['Logo'], functions.center(
dictionaries.IMAGES['Logo']))
for sprite in public.all_sprites:
sprite.draw()
public.screen.blit(button_cover, (0, 345))
pygame.display.flip()
public.clock.tick(public.FPS)
def game():
if public.music:
dictionaries.MEDIA['greetings'].play(-1)
functions.generate_clouds()
functions.generate_level(True)
dt = public.clock.tick(public.FPS) / 1000
cover_alpha = 0
cover_surf = pygame.Surface((public.SWIDTH, public.SHEIGHT))
cover_surf.set_alpha(cover_alpha)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return 0
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_w:
public.player.jump()
elif event.key == pygame.K_SPACE:
public.player.flip()
keys = pygame.key.get_pressed()
if keys[pygame.K_d] and not (public.player.died or public.player.won):
public.player.move('right')
elif keys[pygame.K_a] and not (public.player.died or public.player.won):
public.player.move('left')
else:
public.player.accelerating = False
if public.player.won and cover_alpha != 255:
cover_alpha += 1
cover_surf.set_alpha(cover_alpha)
if cover_alpha == 255:
end('A GAME BY TEAM-ABSTRACTANDROID')
return 0
if public.level == public.level_max:
end('More levels to come soon!')
return 0
public.all_sprites.update()
sorted_sprites = sorted(
public.all_sprites.sprites(), key=lambda x: x.layer)
public.screen.fill([public.bg_type] * 3)
for sprite in sorted_sprites:
sprite.draw()
public.screen.blit(cover_surf, (0, 0))
pygame.display.flip()
public.clock.tick(public.FPS)
def end(msg):
text_alpha = 0
credits_text = public.FONT_LG.render(
msg, False, [public.WHITE] * 3)
credits_text.set_alpha(text_alpha)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return 0
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
return 0
if text_alpha != 255:
text_alpha += 5
credits_text.set_alpha(text_alpha)
public.screen.fill([public.BLACK] * 3)
public.screen.blit(credits_text, functions.center(credits_text))
pygame.display.flip()
public.clock.tick(public.FPS)
# :^)
| 3,608
| 0
| 69
|
2e0aa5f7b3230ca90001a4c7c190460a296a87de
| 6,243
|
py
|
Python
|
tabledataextractor/input/from_html.py
|
ELchem/tabledataextractor
|
9eb38faf57611c26cdcaa8df13fd4e1cf36a4c21
|
[
"MIT"
] | 4
|
2021-09-01T18:28:10.000Z
|
2022-03-29T09:43:34.000Z
|
tabledataextractor/input/from_html.py
|
ELchem/tabledataextractor
|
9eb38faf57611c26cdcaa8df13fd4e1cf36a4c21
|
[
"MIT"
] | 3
|
2021-11-13T21:17:27.000Z
|
2021-11-15T18:29:14.000Z
|
tabledataextractor/input/from_html.py
|
ELchem/tabledataextractor
|
9eb38faf57611c26cdcaa8df13fd4e1cf36a4c21
|
[
"MIT"
] | 2
|
2021-10-07T01:20:39.000Z
|
2021-11-02T17:56:06.000Z
|
# -*- coding: utf-8 -*-
"""
Reads an `html` formatted table.
"""
import numpy as np
from bs4 import BeautifulSoup
import requests
from selenium import webdriver
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.edge.options import Options as EdgeOptions
from selenium.webdriver.ie.options import Options as IeOptions
import copy
import logging
from tabledataextractor.exceptions import InputError
log = logging.getLogger(__name__)
def makearray(html_table):
"""
Creates a numpy array from an `.html` file, taking `rowspan` and `colspan` into account.
Modified from:
John Ricco, https://johnricco.github.io/2017/04/04/python-html/, *Using Python to scrape HTML tables with merged cells*
Added functionality for duplicating cell content for cells with `rowspan`/`colspan`.
The table has to be :math:`n*m`, rectangular, with the same number of columns in every row.
"""
n_cols = 0
n_rows = 0
for row in html_table.findAll("tr"):
col_tags = row.find_all(["td", "th"])
if len(col_tags) > 0:
n_rows += 1
if len(col_tags) > n_cols:
n_cols = len(col_tags)
# according to numpy documentation fill_value should be of type Union[int, float, complex]
# however, 'str' works just fine
array = np.full((n_rows, n_cols), fill_value="", dtype='<U60')
# list to store rowspan values
skip_index = [0 for i in range(0, n_cols)]
# iterating over each row in the table
row_counter = 0
for row in html_table.findAll("tr"):
# skip row if it's empty
if len(row.find_all(["td", "th"])) == 0:
continue
else:
# get all the cells containing data in this row
columns = row.find_all(["td", "th"])
col_dim = []
row_dim = []
col_dim_counter = -1
row_dim_counter = -1
col_counter = -1
this_skip_index = copy.deepcopy(skip_index)
for col in columns:
# determine all cell dimensions
colspan = col.get("colspan")
if not colspan:
col_dim.append(1)
else:
col_dim.append(int(colspan))
col_dim_counter += 1
rowspan = col.get("rowspan")
if not rowspan:
row_dim.append(1)
else:
row_dim.append(int(rowspan))
row_dim_counter += 1
# adjust column counter
if col_counter == -1:
col_counter = 0
else:
col_counter = col_counter + col_dim[col_dim_counter - 1]
while skip_index[col_counter] > 0:
col_counter += 1
# get cell contents
cell_data = col.get_text()
# insert data into cell
array[row_counter, col_counter] = cell_data
# Insert data into neighbouring rowspan/colspan cells
if colspan:
for spanned_col in range(col_counter+1, col_counter + int(colspan)):
array[row_counter, spanned_col] = cell_data
if rowspan:
for spanned_row in range(row_counter+1, row_counter + int(rowspan)):
array[spanned_row, col_counter] = cell_data
#record column skipping index
if row_dim[row_dim_counter] > 1:
this_skip_index[col_counter] = row_dim[row_dim_counter]
# adjust row counter
row_counter += 1
# adjust column skipping index
skip_index = [i - 1 if i > 0 else i for i in this_skip_index]
return array
def read_file(file_path, table_number=1):
"""Reads an .html file and returns a numpy array."""
file = open(file_path, encoding='UTF-8')
html_soup = BeautifulSoup(file, features='lxml')
file.close()
html_table = html_soup.find_all("table")[table_number-1]
array = makearray(html_table)
return array
def configure_selenium(browser='Firefox'):
"""
Configuration for `Selenium <https://selenium-python.readthedocs.io/>`_. Sets the path to ``geckodriver.exe``
:param browser: Which browser to use
:type browser: str
:return: Selenium driver
"""
if browser == 'Firefox':
options = FirefoxOptions()
options.headless = True
driver = webdriver.Firefox(options=options, executable_path=r'C:\Users\juras\System\geckodriver\geckodriver.exe')
return driver
else:
return None
def read_url(url, table_number=1):
"""
Reads in a table from an URL and returns a numpy array. Will try `Requests <http://docs.python-requests.org/en/master/>`_ first. If it doesn't succeed, `Selenium <https://selenium-python.readthedocs.io/>`_ will be used.
:param url: Url of the page where the table is located
:type url: str
:param table_number: Number of Table on the web page.
:type table_number: int
"""
if not isinstance(table_number, int):
msg = 'Table number is not valid.'
log.critical(msg)
raise TypeError(msg)
# first try the requests package, if it fails do the selenium, which is much slower
try:
html_file = requests.get(url)
html_soup = BeautifulSoup(html_file.text, features='lxml')
html_table = html_soup.find_all("table")[table_number - 1]
array = makearray(html_table)
log.info("Package 'requests' was used.")
return array
except Exception:
driver = configure_selenium()
driver.get(url)
html_file = driver.page_source
html_soup = BeautifulSoup(html_file, features='lxml')
try:
html_table = html_soup.find_all("table")[table_number-1]
except IndexError:
raise InputError("table_number={} is out of range".format(table_number))
else:
array = makearray(html_table)
log.info("Package 'selenium' was used.")
return array
| 33.745946
| 223
| 0.606279
|
# -*- coding: utf-8 -*-
"""
Reads an `html` formatted table.
"""
import numpy as np
from bs4 import BeautifulSoup
import requests
from selenium import webdriver
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.edge.options import Options as EdgeOptions
from selenium.webdriver.ie.options import Options as IeOptions
import copy
import logging
from tabledataextractor.exceptions import InputError
log = logging.getLogger(__name__)
def makearray(html_table):
"""
Creates a numpy array from an `.html` file, taking `rowspan` and `colspan` into account.
Modified from:
John Ricco, https://johnricco.github.io/2017/04/04/python-html/, *Using Python to scrape HTML tables with merged cells*
Added functionality for duplicating cell content for cells with `rowspan`/`colspan`.
The table has to be :math:`n*m`, rectangular, with the same number of columns in every row.
"""
n_cols = 0
n_rows = 0
for row in html_table.findAll("tr"):
col_tags = row.find_all(["td", "th"])
if len(col_tags) > 0:
n_rows += 1
if len(col_tags) > n_cols:
n_cols = len(col_tags)
# according to numpy documentation fill_value should be of type Union[int, float, complex]
# however, 'str' works just fine
array = np.full((n_rows, n_cols), fill_value="", dtype='<U60')
# list to store rowspan values
skip_index = [0 for i in range(0, n_cols)]
# iterating over each row in the table
row_counter = 0
for row in html_table.findAll("tr"):
# skip row if it's empty
if len(row.find_all(["td", "th"])) == 0:
continue
else:
# get all the cells containing data in this row
columns = row.find_all(["td", "th"])
col_dim = []
row_dim = []
col_dim_counter = -1
row_dim_counter = -1
col_counter = -1
this_skip_index = copy.deepcopy(skip_index)
for col in columns:
# determine all cell dimensions
colspan = col.get("colspan")
if not colspan:
col_dim.append(1)
else:
col_dim.append(int(colspan))
col_dim_counter += 1
rowspan = col.get("rowspan")
if not rowspan:
row_dim.append(1)
else:
row_dim.append(int(rowspan))
row_dim_counter += 1
# adjust column counter
if col_counter == -1:
col_counter = 0
else:
col_counter = col_counter + col_dim[col_dim_counter - 1]
while skip_index[col_counter] > 0:
col_counter += 1
# get cell contents
cell_data = col.get_text()
# insert data into cell
array[row_counter, col_counter] = cell_data
# Insert data into neighbouring rowspan/colspan cells
if colspan:
for spanned_col in range(col_counter+1, col_counter + int(colspan)):
array[row_counter, spanned_col] = cell_data
if rowspan:
for spanned_row in range(row_counter+1, row_counter + int(rowspan)):
array[spanned_row, col_counter] = cell_data
#record column skipping index
if row_dim[row_dim_counter] > 1:
this_skip_index[col_counter] = row_dim[row_dim_counter]
# adjust row counter
row_counter += 1
# adjust column skipping index
skip_index = [i - 1 if i > 0 else i for i in this_skip_index]
return array
def read_file(file_path, table_number=1):
"""Reads an .html file and returns a numpy array."""
file = open(file_path, encoding='UTF-8')
html_soup = BeautifulSoup(file, features='lxml')
file.close()
html_table = html_soup.find_all("table")[table_number-1]
array = makearray(html_table)
return array
def configure_selenium(browser='Firefox'):
"""
Configuration for `Selenium <https://selenium-python.readthedocs.io/>`_. Sets the path to ``geckodriver.exe``
:param browser: Which browser to use
:type browser: str
:return: Selenium driver
"""
if browser == 'Firefox':
options = FirefoxOptions()
options.headless = True
driver = webdriver.Firefox(options=options, executable_path=r'C:\Users\juras\System\geckodriver\geckodriver.exe')
return driver
else:
return None
def read_url(url, table_number=1):
"""
Reads in a table from an URL and returns a numpy array. Will try `Requests <http://docs.python-requests.org/en/master/>`_ first. If it doesn't succeed, `Selenium <https://selenium-python.readthedocs.io/>`_ will be used.
:param url: Url of the page where the table is located
:type url: str
:param table_number: Number of Table on the web page.
:type table_number: int
"""
if not isinstance(table_number, int):
msg = 'Table number is not valid.'
log.critical(msg)
raise TypeError(msg)
# first try the requests package, if it fails do the selenium, which is much slower
try:
html_file = requests.get(url)
html_soup = BeautifulSoup(html_file.text, features='lxml')
html_table = html_soup.find_all("table")[table_number - 1]
array = makearray(html_table)
log.info("Package 'requests' was used.")
return array
except Exception:
driver = configure_selenium()
driver.get(url)
html_file = driver.page_source
html_soup = BeautifulSoup(html_file, features='lxml')
try:
html_table = html_soup.find_all("table")[table_number-1]
except IndexError:
raise InputError("table_number={} is out of range".format(table_number))
else:
array = makearray(html_table)
log.info("Package 'selenium' was used.")
return array
| 0
| 0
| 0
|
8a2527c8ebf711cd89d50a2c1b007f80d07a457b
| 924
|
py
|
Python
|
07/script.py
|
has-ctrl/advent-of-code-2021
|
09d309feb5082f108ab690f9e37abf6150b7283d
|
[
"MIT"
] | null | null | null |
07/script.py
|
has-ctrl/advent-of-code-2021
|
09d309feb5082f108ab690f9e37abf6150b7283d
|
[
"MIT"
] | null | null | null |
07/script.py
|
has-ctrl/advent-of-code-2021
|
09d309feb5082f108ab690f9e37abf6150b7283d
|
[
"MIT"
] | null | null | null |
import numpy as np
test_data = np.array([16, 1, 2, 0, 4, 2, 7, 1, 2, 14])
np_data = np.loadtxt("data.txt", delimiter=",", dtype=int)
def one(data: np.ndarray) -> int:
"""
Determine the horizontal position that the crabs can align to using the least fuel possible. How much fuel must they
spend to align to that position?
"""
median = np.median(data).astype(int)
return np.absolute(data - median).sum()
def two(data: np.ndarray) -> int:
"""
Determine the horizontal position that the crabs can align to using the least fuel possible so they can make you an
escape route! How much fuel must they spend to align to that position?
"""
mean = np.mean(data).astype(int)
diff = np.absolute(data - mean)
# 'Factorial for addition' is the same as (X^2 + X) / 2
return ((diff * diff + diff) / 2).astype(int).sum()
print(f"1. {one(np_data)}")
print(f"2. {two(np_data)}")
| 30.8
| 120
| 0.650433
|
import numpy as np
test_data = np.array([16, 1, 2, 0, 4, 2, 7, 1, 2, 14])
np_data = np.loadtxt("data.txt", delimiter=",", dtype=int)
def one(data: np.ndarray) -> int:
"""
Determine the horizontal position that the crabs can align to using the least fuel possible. How much fuel must they
spend to align to that position?
"""
median = np.median(data).astype(int)
return np.absolute(data - median).sum()
def two(data: np.ndarray) -> int:
"""
Determine the horizontal position that the crabs can align to using the least fuel possible so they can make you an
escape route! How much fuel must they spend to align to that position?
"""
mean = np.mean(data).astype(int)
diff = np.absolute(data - mean)
# 'Factorial for addition' is the same as (X^2 + X) / 2
return ((diff * diff + diff) / 2).astype(int).sum()
print(f"1. {one(np_data)}")
print(f"2. {two(np_data)}")
| 0
| 0
| 0
|
46445e6276cdd339ed1cb28a14605af7c00ee8a9
| 787
|
py
|
Python
|
docs/src/callbackgen.py
|
aristanetworks/ctypegen
|
379f8e5c712c8deb0ed27cbf005d7706fa11e6e8
|
[
"Apache-2.0"
] | 17
|
2018-06-12T10:07:42.000Z
|
2022-03-23T14:03:33.000Z
|
docs/src/callbackgen.py
|
aristanetworks/ctypegen
|
379f8e5c712c8deb0ed27cbf005d7706fa11e6e8
|
[
"Apache-2.0"
] | 4
|
2018-10-29T17:55:34.000Z
|
2021-10-08T07:19:12.000Z
|
docs/src/callbackgen.py
|
aristanetworks/ctypegen
|
379f8e5c712c8deb0ed27cbf005d7706fa11e6e8
|
[
"Apache-2.0"
] | 7
|
2018-12-20T19:35:45.000Z
|
2021-05-18T03:42:17.000Z
|
# Copyright (c) 2018 Arista Networks, Inc. All rights reserved.
# Arista Networks, Inc. Confidential and Proprietary.
#
# DON'T EDIT THIS FILE. It was generated by
# /usr/local/lib/python2.7/dist-packages/CTypeGen.py
# Please see AID/3558 for details on the contents of this file
#
from ctypes import * # pylint: disable=wildcard-import
from CTypeGenRun import * # pylint: disable=wildcard-import
# pylint: disable=unnecessary-pass,protected-access
Callback = CFUNCTYPE( c_int, c_int
, c_int
)
functionTypes = {
'callme': CFUNCTYPE( c_int, c_int
, c_int
, Callback
),
}
if __name__ == "__main__":
test_classes()
| 21.861111
| 64
| 0.684879
|
# Copyright (c) 2018 Arista Networks, Inc. All rights reserved.
# Arista Networks, Inc. Confidential and Proprietary.
#
# DON'T EDIT THIS FILE. It was generated by
# /usr/local/lib/python2.7/dist-packages/CTypeGen.py
# Please see AID/3558 for details on the contents of this file
#
from ctypes import * # pylint: disable=wildcard-import
from CTypeGenRun import * # pylint: disable=wildcard-import
# pylint: disable=unnecessary-pass,protected-access
Callback = CFUNCTYPE( c_int, c_int
, c_int
)
def decorateFunctions( lib ):
lib.callme.restype = c_int
lib.callme.argtypes = [
c_int,
c_int,
Callback ]
functionTypes = {
'callme': CFUNCTYPE( c_int, c_int
, c_int
, Callback
),
}
if __name__ == "__main__":
test_classes()
| 108
| 0
| 23
|
f5ccf91f07f564599f0a2cf7b1cc3268aa005d97
| 1,296
|
py
|
Python
|
generated-libraries/python/ports.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | 2
|
2017-03-28T15:31:26.000Z
|
2018-08-16T22:15:18.000Z
|
generated-libraries/python/ports.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
generated-libraries/python/ports.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
from netapp.connection import NaErrorResponse, NaPagedResponse
from netapp.net import NetConnection
from netapp.net.net_port_info import NetPortInfo
conn = NetConnection("192.168.135.100", "admin", "mehmeh123")
print "LISTING ALL PORTS:"
print "-----------------------------------------------"
query = NetPortInfo(node="radontap-02")
response = conn.net_port_get_iter( desired_attributes="node,port".split(","), query=query )
if isinstance(response, NaPagedResponse):
for npi in response.output:
print "{}: {}".format( npi.port, npi )
while response.has_more():
next = response.next()
if isinstance(next.result, NaErrorResponse):
print "There was an error: {} : {}".format( next.result.error_code, next.result.reason )
else:
for npi in next.output:
print "{}: {}".format( npi.port, npi )
elif isinstance(response, NaErrorResponse):
print "There was an error: {} : {}".format( response.error_code, response.reason )
else:
for npi in response:
print "{}: {}".format( npi.port, npi )
print "GET A SINGLE PORT:"
print "-----------------------------------------------"
port_info = conn.net_port_get( node="radontap-02", port="e0c", desired_attributes="node,port".split(",") )
print port_info
| 35.027027
| 106
| 0.622685
|
from netapp.connection import NaErrorResponse, NaPagedResponse
from netapp.net import NetConnection
from netapp.net.net_port_info import NetPortInfo
conn = NetConnection("192.168.135.100", "admin", "mehmeh123")
print "LISTING ALL PORTS:"
print "-----------------------------------------------"
query = NetPortInfo(node="radontap-02")
response = conn.net_port_get_iter( desired_attributes="node,port".split(","), query=query )
if isinstance(response, NaPagedResponse):
for npi in response.output:
print "{}: {}".format( npi.port, npi )
while response.has_more():
next = response.next()
if isinstance(next.result, NaErrorResponse):
print "There was an error: {} : {}".format( next.result.error_code, next.result.reason )
else:
for npi in next.output:
print "{}: {}".format( npi.port, npi )
elif isinstance(response, NaErrorResponse):
print "There was an error: {} : {}".format( response.error_code, response.reason )
else:
for npi in response:
print "{}: {}".format( npi.port, npi )
print "GET A SINGLE PORT:"
print "-----------------------------------------------"
port_info = conn.net_port_get( node="radontap-02", port="e0c", desired_attributes="node,port".split(",") )
print port_info
| 0
| 0
| 0
|
4041a20fc51def3b3801556656d9b21062ae0f2d
| 185
|
py
|
Python
|
torch/fx/experimental/unification/__init__.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 60,067
|
2017-01-18T17:21:31.000Z
|
2022-03-31T21:37:45.000Z
|
torch/fx/experimental/unification/__init__.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 66,955
|
2017-01-18T17:21:38.000Z
|
2022-03-31T23:56:11.000Z
|
torch/fx/experimental/unification/__init__.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 19,210
|
2017-01-18T17:45:04.000Z
|
2022-03-31T23:51:56.000Z
|
# type: ignore[attr-defined]
from .core import unify, reify # noqa: F403
from .more import unifiable # noqa: F403
from .variable import var, isvar, vars, variables, Var # noqa: F403
| 37
| 68
| 0.724324
|
# type: ignore[attr-defined]
from .core import unify, reify # noqa: F403
from .more import unifiable # noqa: F403
from .variable import var, isvar, vars, variables, Var # noqa: F403
| 0
| 0
| 0
|
3be09ddb058024d53f0d37a425c547e2ad46cc57
| 2,147
|
py
|
Python
|
psinsights/rules.py
|
paulcronk/psinsights
|
cd465f20254fbdb30032ce40b6fe30d32de0d524
|
[
"Apache-2.0"
] | null | null | null |
psinsights/rules.py
|
paulcronk/psinsights
|
cd465f20254fbdb30032ce40b6fe30d32de0d524
|
[
"Apache-2.0"
] | null | null | null |
psinsights/rules.py
|
paulcronk/psinsights
|
cd465f20254fbdb30032ce40b6fe30d32de0d524
|
[
"Apache-2.0"
] | null | null | null |
###############################################################################
# Copyright 2012 FastSoft Inc.
# Copyright 2012 Devin Anderson <danderson (at) fastsoft (dot) com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
###############################################################################
from psinsights.rule import Rule as _Rule
| 28.626667
| 79
| 0.583605
|
###############################################################################
# Copyright 2012 FastSoft Inc.
# Copyright 2012 Devin Anderson <danderson (at) fastsoft (dot) com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
###############################################################################
from psinsights.rule import Rule as _Rule
class Rules(object):
def __contains__(self, name):
return name in self.__data
def __del__(self):
self.__data = None
self.__rule_map = None
def __getitem__(self, name):
rule = self.get(name)
if rule is None:
raise KeyError(name)
return rule
def __init__(self, data):
self.__data = data
self.__rule_map = {}
def __iter__(self):
return self.__data.iterkeys()
def __len__(self):
return len(self.__data)
def get(self, name, default=None):
rule_map = self.__rule_map
rule = rule_map.get(name)
if rule is None:
data = self.__data
rule_data = data.get(name)
if rule_data is None:
return default
rule = _Rule(rule_data)
rule_map[name] = rule
return rule
def items(self):
return list(self.iteritems())
def iteritems(self):
get = self.get
return ((k, get(k)) for k in self.__data.iterkeys())
iterkeys = __iter__
def itervalues(self):
get = self.get
return (get(k) for k in self.__data.iterkeys())
def keys(self):
return list(iter(self))
def values(self):
return list(self.itervalues())
| 926
| 348
| 23
|
662efd1261fb763f2ca5bdab861633e763419ddb
| 552
|
py
|
Python
|
exercicio-condicional/questao-1.py
|
maumneto/exercicio-python
|
bd57cd9f3b48c76ea3f8195544d347bc1b0c943e
|
[
"MIT"
] | null | null | null |
exercicio-condicional/questao-1.py
|
maumneto/exercicio-python
|
bd57cd9f3b48c76ea3f8195544d347bc1b0c943e
|
[
"MIT"
] | null | null | null |
exercicio-condicional/questao-1.py
|
maumneto/exercicio-python
|
bd57cd9f3b48c76ea3f8195544d347bc1b0c943e
|
[
"MIT"
] | 1
|
2020-04-27T15:01:10.000Z
|
2020-04-27T15:01:10.000Z
|
'''
Faça um programa que leia o salário de um trabalhador e o valor
da prestação de um empréstimo. Se a prestação for maior que 20%
do salário imprima: “Empréstimo não concedido”; caso contrário imprima: “Empréstimo concedido”.
'''
# entrada de dados
salarao = float(input('Digite o valor do salario: '))
prestacao = float(input('Digite o valor da prestacao: '))
# condicional
if (prestacao > 0.2*salarao):
print('Emprestimo nao concedido!')
else:
print('Emprestimo concedido!')
# mensagem de término de algoritmo
print('Fim do algoritmo!')
| 30.666667
| 95
| 0.733696
|
'''
Faça um programa que leia o salário de um trabalhador e o valor
da prestação de um empréstimo. Se a prestação for maior que 20%
do salário imprima: “Empréstimo não concedido”; caso contrário imprima: “Empréstimo concedido”.
'''
# entrada de dados
salarao = float(input('Digite o valor do salario: '))
prestacao = float(input('Digite o valor da prestacao: '))
# condicional
if (prestacao > 0.2*salarao):
print('Emprestimo nao concedido!')
else:
print('Emprestimo concedido!')
# mensagem de término de algoritmo
print('Fim do algoritmo!')
| 0
| 0
| 0
|
4b998a8b759bc5a4cf2d3b91ee6979cd04cfc889
| 12,997
|
py
|
Python
|
Firefly/services/firefly_security_and_monitoring/firefly_monitoring.py
|
Firefly-Automation/Firefly
|
fccf40b8f6e015ef34c292264184090eb8d860b7
|
[
"Apache-2.0"
] | 20
|
2017-03-24T08:25:50.000Z
|
2020-07-07T16:09:34.000Z
|
Firefly/services/firefly_security_and_monitoring/firefly_monitoring.py
|
Firefly-Automation/Firefly
|
fccf40b8f6e015ef34c292264184090eb8d860b7
|
[
"Apache-2.0"
] | 1
|
2017-11-02T17:46:48.000Z
|
2017-11-02T17:46:48.000Z
|
Firefly/services/firefly_security_and_monitoring/firefly_monitoring.py
|
Firefly-Automation/Firefly
|
fccf40b8f6e015ef34c292264184090eb8d860b7
|
[
"Apache-2.0"
] | 5
|
2017-04-11T02:27:38.000Z
|
2020-12-11T07:44:00.000Z
|
"""
Firefly Security and Monitoring
This is the core Firefly Security and Monitoring Service. There should be almost zero config to the user and firefly will monitor the entire house.
- Alarm System (Away)
- Alarm System (Night)
- Vacation Lighting
- Battery Monitor
- Smoke Alerts
- Flooding Alerts
"""
from Firefly import logging, scheduler, aliases
from Firefly.const import COMMAND_NOTIFY, EVENT_TYPE_BROADCAST, FIREFLY_SECURITY_MONITORING, SERVICE_NOTIFICATION, SOURCE_LOCATION, TYPE_DEVICE, WATER, SENSOR_DRY, SENSOR_WET
from Firefly.helpers.device import BATTERY, CONTACT, CONTACT_CLOSE, CONTACT_OPEN, MOTION, MOTION_ACTIVE, MOTION_INACTIVE
from Firefly.helpers.events import Command, Event
from Firefly.services.firefly_security_and_monitoring.battery_monitor import check_battery_from_event, generate_battery_notification_message
from Firefly.services.firefly_security_and_monitoring.secueity_settings import FireflySecuritySettings
from Firefly.services.firefly_security_and_monitoring.security_monitor import (check_all_security_contact_sensors, check_all_security_motion_sensors, generate_contact_warning_message,
process_contact_change, process_motion_change)
from Firefly.util.firefly_util import command_from_dict
from .const import ALARM_ARMED_MESSAGE_MOTION, ALARM_ARMED_MESSAGE_NO_MOTION, BATTERY_LOW, BATTERY_NO_NOTIFY_STATES, STATUS_TEMPLATE
ALARM_DISARMED = 'disarmed'
ALARM_ARMED = 'armed'
ALARM_ARMED_MOTION = 'armed_motion'
ALARM_ARMED_NO_MOTION = 'armed_no_motion'
ALARM_TRIGGERED = 'triggered'
SYSTEM_DISABLED = 'system_diabled'
| 38.11437
| 183
| 0.701239
|
"""
Firefly Security and Monitoring
This is the core Firefly Security and Monitoring Service. There should be almost zero config to the user and firefly will monitor the entire house.
- Alarm System (Away)
- Alarm System (Night)
- Vacation Lighting
- Battery Monitor
- Smoke Alerts
- Flooding Alerts
"""
from Firefly import logging, scheduler, aliases
from Firefly.const import COMMAND_NOTIFY, EVENT_TYPE_BROADCAST, FIREFLY_SECURITY_MONITORING, SERVICE_NOTIFICATION, SOURCE_LOCATION, TYPE_DEVICE, WATER, SENSOR_DRY, SENSOR_WET
from Firefly.helpers.device import BATTERY, CONTACT, CONTACT_CLOSE, CONTACT_OPEN, MOTION, MOTION_ACTIVE, MOTION_INACTIVE
from Firefly.helpers.events import Command, Event
from Firefly.services.firefly_security_and_monitoring.battery_monitor import check_battery_from_event, generate_battery_notification_message
from Firefly.services.firefly_security_and_monitoring.secueity_settings import FireflySecuritySettings
from Firefly.services.firefly_security_and_monitoring.security_monitor import (check_all_security_contact_sensors, check_all_security_motion_sensors, generate_contact_warning_message,
process_contact_change, process_motion_change)
from Firefly.util.firefly_util import command_from_dict
from .const import ALARM_ARMED_MESSAGE_MOTION, ALARM_ARMED_MESSAGE_NO_MOTION, BATTERY_LOW, BATTERY_NO_NOTIFY_STATES, STATUS_TEMPLATE
ALARM_DISARMED = 'disarmed'
ALARM_ARMED = 'armed'
ALARM_ARMED_MOTION = 'armed_motion'
ALARM_ARMED_NO_MOTION = 'armed_no_motion'
ALARM_TRIGGERED = 'triggered'
SYSTEM_DISABLED = 'system_diabled'
class FireflySecurityAndMonitoring(object):
def __init__(self, firefly, enabled=True):
self.firefly = firefly
self.enabled = enabled
self.status = STATUS_TEMPLATE
self.alarm_status = ALARM_DISARMED
self.settings = FireflySecuritySettings()
def shutdown(self, **kwargs):
self.settings.save_config()
def get_alarm_status(self, **kwargs):
if not self.enabled:
return SYSTEM_DISABLED
return self.alarm_status
def event(self, event: Event, **kwargs):
logging.info('[FIREFLY SECURITY] event received: %s' % str(event))
if not self.enabled:
logging.info('[FIREFLY SECURITY] security and monitoring not enabled')
return
# Process Battery Notifications
if BATTERY in event.event_action:
self.process_battery_event(event)
# Process water event only if monitoring is enabled for the device.
if WATER in event.event_action:
if self.check_security_enabled(event.source):
self.process_water_event(event)
# Enter Secure Mode
if event.source == SOURCE_LOCATION and 'mode' in event.event_action:
mode = event.event_action['mode']
if self.check_secure_mode(mode):
self.enter_secure_mode()
# Exit secure mode
last_mode = self.firefly.location.lastMode
if not self.check_secure_mode(mode) and self.check_secure_mode(last_mode):
self.alarm_status = ALARM_DISARMED
self.status['status']['alarm'] = self.alarm_status
self.firefly.update_security_firebase(self.status)
self.send_notification('Security alarm disabled.')
self.broadcast_status()
return
if event.source not in self.firefly.components:
logging.info('[FIREFLY SECURITY] event source not in components: %s' % event.source)
return
# Process Events while in secure mode
if self.check_secure_mode():
if not self.check_security_enabled(event.source):
logging.info('[FIREFLY SECURITY] event source is not device')
return
self.process_event_secure_mode(event)
self.update_status(event)
def startup(self, **kwargs):
if self.check_secure_mode():
self.enter_secure_mode()
def check_secure_mode(self, mode=None, no_motion=True, motion=True):
"""
Args:
mode: The mode to check.
no_motion: Check for modes with no motion active.
motion: Check for modes with motion active.
Returns: (bool) is in secure mode
"""
if mode is None:
mode = self.firefly.location.mode
mode_secure_no_motion = mode in self.settings.secure_modes_no_motion
mode_secure_motion = mode in self.settings.secure_modes_motion
if no_motion and motion:
return mode_secure_motion or mode_secure_no_motion
elif no_motion:
return mode_secure_no_motion
elif motion:
return mode_secure_motion
return False
# TODO: Move this into security monitor
def generate_status(self, **kwargs):
if not self.enabled:
return
contact_states = check_all_security_contact_sensors(self.firefly.components, self.firefly.current_state)
motion_states = check_all_security_motion_sensors(self.firefly.components, self.firefly.current_state)
status_data = {
'status': {
'message': 'Message Placeholder',
'alarm': self.alarm_status
},
CONTACT: {
'message': '',
CONTACT_OPEN: {
'count': len(contact_states[CONTACT_OPEN]),
'devices': contact_states[CONTACT_OPEN]
},
CONTACT_CLOSE: {
'count': len(contact_states[CONTACT_CLOSE]),
'devices': contact_states[CONTACT_CLOSE]
}
},
MOTION: {
'message': '',
MOTION_ACTIVE: {
'count': len(motion_states[MOTION_ACTIVE]),
'devices': motion_states[MOTION_ACTIVE]
},
MOTION_INACTIVE: {
'count': len(motion_states[MOTION_INACTIVE]),
'devices': motion_states[MOTION_INACTIVE]
}
}
}
self.status = status_data
self.firefly.update_security_firebase(self.status)
def check_security_enabled(self, ff_id: str, filter_type=TYPE_DEVICE) -> bool:
if ff_id not in self.firefly.components:
logging.info('[FIREFLY SECURITY] component not found: %s' % ff_id)
return False
try:
component = self.firefly.components[ff_id]
return component.security and component.type == filter_type
except:
return False
# TODO: Move this into security monitor
def update_status(self, event: Event):
ff_id = event.source
if not self.check_security_enabled(ff_id):
return
# Update Contact Status
if CONTACT in event.event_action:
if event.event_action[CONTACT] == CONTACT_OPEN:
self.status[CONTACT][CONTACT_OPEN]['devices'].append(ff_id)
self.status[CONTACT][CONTACT_OPEN]['count'] = len(self.status[CONTACT][CONTACT_OPEN]['devices'])
try:
self.status[CONTACT][CONTACT_CLOSE]['devices'].remove(ff_id)
self.status[CONTACT][CONTACT_CLOSE]['count'] = len(self.status[CONTACT][CONTACT_CLOSE]['devices'])
except Exception as e:
logging.error('[FIREFLY SECURITY] error updating status: %s' % e)
if event.event_action[CONTACT] == CONTACT_CLOSE:
self.status[CONTACT][CONTACT_CLOSE]['devices'].append(ff_id)
self.status[CONTACT][CONTACT_CLOSE]['count'] = len(self.status[CONTACT][CONTACT_CLOSE]['devices'])
try:
self.status[CONTACT][CONTACT_OPEN]['devices'].remove(ff_id)
self.status[CONTACT][CONTACT_OPEN]['count'] = len(self.status[CONTACT][CONTACT_OPEN]['devices'])
except Exception as e:
logging.error('[FIREFLY SECURITY] error updating status: %s' % e)
# Update Motion Status
if MOTION in event.event_action:
if event.event_action[MOTION] == MOTION_ACTIVE:
self.status[MOTION][MOTION_ACTIVE]['devices'].append(ff_id)
self.status[MOTION][MOTION_ACTIVE]['count'] = len(self.status[MOTION][MOTION_ACTIVE]['devices'])
try:
self.status[MOTION][MOTION_INACTIVE]['devices'].remove(ff_id)
self.status[MOTION][MOTION_INACTIVE]['count'] = len(self.status[MOTION][MOTION_INACTIVE]['devices'])
except Exception as e:
logging.error('[FIREFLY SECURITY] error updating status: %s' % e)
if event.event_action[MOTION] == MOTION_INACTIVE:
self.status[MOTION][MOTION_INACTIVE]['devices'].append(ff_id)
self.status[MOTION][MOTION_INACTIVE]['count'] = len(self.status[MOTION][MOTION_INACTIVE]['devices'])
try:
self.status[MOTION][MOTION_ACTIVE]['devices'].remove(ff_id)
self.status[MOTION][MOTION_ACTIVE]['count'] = len(self.status[MOTION][MOTION_ACTIVE]['devices'])
except Exception as e:
logging.error('[FIREFLY SECURITY] error updating status: %s' % e)
self.firefly.update_security_firebase(self.status)
def process_event_secure_mode(self, event: Event):
alarm_triggered = False
contact_data = process_contact_change(event)
if contact_data['contact_event']:
self.send_notification(contact_data['message'])
if contact_data['alarm']:
alarm_triggered = True
logging.info('[FIREFLY SECURITY] ALARM TRIGGERED')
# TODO: Turn on listed lights, if no lights listed then turn on all lights
if self.check_secure_mode(no_motion=False):
motion_data = process_motion_change(event)
if motion_data['alarm']:
alarm_triggered = True
self.send_notification(motion_data['message'])
logging.info('[FIREFLY SECURITY] ALARM TRIGGERED')
if alarm_triggered:
self.trigger_alarm()
def trigger_alarm(self, **kwargs):
logging.info('TRIGGERING ALARM')
self.alarm_status = ALARM_TRIGGERED
lights = self.settings.lights
if not lights:
lights = self.get_devices_by_tag()
for ff_id in lights:
command = command_from_dict(ff_id, self.id, self.settings.light_command)
logging.info('FIREFLY SECURITY] sending command %s' % str(command))
self.firefly.send_command(command)
alarms = self.settings.alarms
if not alarms:
alarms = self.get_devices_by_tag(tags=['alarm'])
for ff_id in alarms:
command = Command(ff_id, self.id, self.settings.alarm_command)
self.firefly.send_command(command)
self.broadcast_status()
self.status['status']['alarm'] = self.alarm_status.replace('_', ' ')
self.firefly.update_security_firebase(self.status)
def enter_secure_mode(self, **kwargs):
logging.info('[FIREFLY SECURITY] Entering Secure Mode.')
# Grab snapshot of current state
current_state = self.firefly.current_state.copy()
components = self.firefly.components
contact_states = check_all_security_contact_sensors(components, current_state)
if contact_states[CONTACT_OPEN]:
message = generate_contact_warning_message(contact_states)
self.send_notification(message)
# If no contacts open then send notification that alarm is now armed.
if self.check_secure_mode(no_motion=False):
self.send_notification(ALARM_ARMED_MESSAGE_MOTION)
self.alarm_status = ALARM_ARMED_MOTION
else:
self.send_notification(ALARM_ARMED_MESSAGE_NO_MOTION)
self.alarm_status = ALARM_ARMED_NO_MOTION
self.status['status']['alarm'] = self.alarm_status.replace('_', ' ')
self.firefly.update_security_firebase(self.status)
self.broadcast_status()
def broadcast_status(self, **kwargs):
event = Event(self.id, EVENT_TYPE_BROADCAST, {
'status': self.alarm_status,
})
self.firefly.send_event(event)
def get_devices_by_tag(self, tags=['light'], **kwargs):
devices = []
for ff_id, component in self.firefly.components.items():
if component.type != TYPE_DEVICE:
continue
try:
for tag in component.tags:
if tag in tags:
devices.append(ff_id)
continue
except:
pass
return devices
def process_water_event(self, event: Event, **kwargs):
alias = aliases.get_alias(event.source)
if event.event_action.get(WATER) == SENSOR_WET:
self.send_notification('ALERT!!! Water detected by: %s' % alias)
self.trigger_alarm()
return
if event.event_action.get(WATER) == SENSOR_DRY:
self.send_notification('ALERT!!! Water no longer detected by: %s' % alias)
return
def process_battery_event(self, event: Event, **kwargs):
(battery_state, battery_level) = check_battery_from_event(event)
if battery_state in BATTERY_NO_NOTIFY_STATES:
if scheduler.cancel('%s_battery_notify' % event.source):
self.send_notification('Battery in %s has been replaced.')
return
message = generate_battery_notification_message(event.source, battery_state, battery_level)
self.send_notification(message)
if battery_state == BATTERY_LOW:
return
scheduler.runEveryH(4, self.send_notification, job_id='%s_battery_notify' % event.source, message=message)
return
def send_notification(self, message):
notify = Command(SERVICE_NOTIFICATION, self.id, COMMAND_NOTIFY, message=message)
self.firefly.send_command(notify)
@property
def id(self):
return FIREFLY_SECURITY_MONITORING
| 10,104
| 1,232
| 23
|
0c76885b70fe7b575d9278df97a40daf190c7e04
| 324
|
py
|
Python
|
optirocket/library/constants.py
|
Keith-Maxwell/OptiRocket
|
d99ac8d2b868b60a2bbf32f5a8a31ecdcaeea5b0
|
[
"MIT"
] | null | null | null |
optirocket/library/constants.py
|
Keith-Maxwell/OptiRocket
|
d99ac8d2b868b60a2bbf32f5a8a31ecdcaeea5b0
|
[
"MIT"
] | 3
|
2021-01-14T15:09:51.000Z
|
2021-02-12T17:05:18.000Z
|
optirocket/library/constants.py
|
Keith-Maxwell/OptiRocket
|
d99ac8d2b868b60a2bbf32f5a8a31ecdcaeea5b0
|
[
"MIT"
] | 1
|
2021-01-11T02:34:29.000Z
|
2021-01-11T02:34:29.000Z
|
# standard gravitational parameter for Earth = G*M
EARTH_GRAV_CONST = 3.986005e5 # (km^3/s^2)
# Earth Radius
EARTH_RADIUS = 6378.137 # (km)
# Earth rotation speed (calculated from sideral period)
EARTH_ROT_RATE = 6.300387486749 / 86164 # (rad/s)
# Earth gravitation at sea leve
EARTH_GRAV_SEA_LVL = 9.80665 # (m^2/s)
| 27
| 55
| 0.725309
|
# standard gravitational parameter for Earth = G*M
EARTH_GRAV_CONST = 3.986005e5 # (km^3/s^2)
# Earth Radius
EARTH_RADIUS = 6378.137 # (km)
# Earth rotation speed (calculated from sideral period)
EARTH_ROT_RATE = 6.300387486749 / 86164 # (rad/s)
# Earth gravitation at sea leve
EARTH_GRAV_SEA_LVL = 9.80665 # (m^2/s)
| 0
| 0
| 0
|
42488845e1b00797f2c42f02abc38006597e292a
| 4,539
|
py
|
Python
|
my_utils/misc.py
|
Jennifercheukyin/High-Speed-Pedestrian-Crossing-Prediction
|
09cceb0efaf4d074ee16d11d8f91292ce9dec854
|
[
"MIT"
] | 4
|
2021-10-22T01:33:16.000Z
|
2022-03-09T06:39:54.000Z
|
my_utils/misc.py
|
Jennifercheukyin/High-Speed-Pedestrian-Crossing-Prediction
|
09cceb0efaf4d074ee16d11d8f91292ce9dec854
|
[
"MIT"
] | null | null | null |
my_utils/misc.py
|
Jennifercheukyin/High-Speed-Pedestrian-Crossing-Prediction
|
09cceb0efaf4d074ee16d11d8f91292ce9dec854
|
[
"MIT"
] | null | null | null |
'''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
import errno
import os
import sys
import time
import math
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Variable
import numpy as np
import pdb
import torch
__all__ = ['get_mean_and_std', 'init_params', 'mkdir_p', 'AverageMeter', 'MovingAverage', 'AverageMeter_Mat', 'Timer']
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = trainloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
def mkdir_p(path):
'''make dir if not exist'''
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class AverageMeter(object):
"""Computes and stores the average and current value
Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
| 30.463087
| 118
| 0.592642
|
'''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
import errno
import os
import sys
import time
import math
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Variable
import numpy as np
import pdb
import torch
__all__ = ['get_mean_and_std', 'init_params', 'mkdir_p', 'AverageMeter', 'MovingAverage', 'AverageMeter_Mat', 'Timer']
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = trainloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
def mkdir_p(path):
'''make dir if not exist'''
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class MovingAverage(object):
def __init__(self, length):
self.length = length
self.count = 0
self.pointer = 0
self.values = np.zeros(length)
# self.avg = 0
def update(self, val):
self.values[self.pointer] = val
self.pointer += 1
if self.pointer == self.length:
self.pointer = 0
self.count += 1
self.count = np.minimum(self.count, self.length)
def avg(self):
return self.values.sum() / float(self.count)
def reset(self):
self.count = 0
self.pointer = 0
# self.avg = 0
self.values.fill(0)
class AverageMeter(object):
"""Computes and stores the average and current value
Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
# pdb.set_trace()
self.count += n
self.avg = self.sum / self.count
class AverageMeter_Mat(object):
def __init__(self,number_ID):
self.number_ID = number_ID
self.reset()
def reset(self):
# self.sum = Variable(torch.Tensor(self.number_ID,64).fill_(0).cuda())
# self.num = Variable(torch.Tensor(self.number_ID,64).fill_(0).cuda())
self.center = Variable(torch.Tensor(self.number_ID,64).fill_(0).cuda(), requires_grad=False)
# self.dif = Variable(torch.Tensor(self.number_ID,64).fill_(0).cuda())
self.sum = torch.Tensor(self.number_ID,64).fill_(0).cuda()
self.num = torch.Tensor(self.number_ID,64).fill_(0).cuda()
# self.center = torch.Tensor(self.number_ID,64).fill_(0).cuda()
# self.dif = torch.Tensor(self.number_ID,64).fill_(0).cuda()
# self.sum = torch.Tensor(self.number_ID,64).fill_(0)
# self.num = torch.Tensor(self.number_ID,64).fill_(0)
# self.center = torch.Tensor(self.number_ID,64).fill_(0)
# self.dif = torch.Tensor(self.number_ID,64).fill_(0)
def update(self, SIR, ID, n):
# pdb.set_trace()
self.sum[ID,:] += SIR.data
# pdb.set_trace()
self.num[ID,:] += 1*n
self.center[ID,:] = self.sum[ID] / self.num[ID]
# self.dif[ID,:] = SIR - Variable(self.center[ID])
# self.avg = 0.5*torch.mean(self.dif**2)
class Timer(object):
def __init__(self):
pass
def reset(self):
self.T = time.time()
def time(self, reset=False):
ti = time.time() - self.T
if reset:
self.reset()
return ti
| 2,101
| 16
| 428
|
ca91f55ea74fe8da53eabdf2dc43a829dbcf7253
| 1,697
|
py
|
Python
|
gradient_decent_simple_linear_regression.py
|
eshanmherath/linear-regression
|
5b473586679a4b4594706faeb2bb7e4922c7ab38
|
[
"MIT"
] | 1
|
2020-12-09T04:19:46.000Z
|
2020-12-09T04:19:46.000Z
|
gradient_decent_simple_linear_regression.py
|
eshanmherath/linear-regression
|
5b473586679a4b4594706faeb2bb7e4922c7ab38
|
[
"MIT"
] | null | null | null |
gradient_decent_simple_linear_regression.py
|
eshanmherath/linear-regression
|
5b473586679a4b4594706faeb2bb7e4922c7ab38
|
[
"MIT"
] | null | null | null |
import numpy as np
np.random.seed(111)
'''
The data is generated adding noise to the values from y = 0.8x + 2 equation
Therefore the expectation of the auto encoder is to get the values w and b closer to 0.8 and 2 respectively
'''
'''generate random x values'''
X_train = np.random.random((1, 50))[0]
'''get the reference y value'''
y_reference = 0.8*X_train + 2
'''add noise to the reference y value'''
y_train = y_reference + np.sqrt(0.01)*np.random.random((1, 50))[0]
W = np.random.random()
b = np.random.random()
'''number of training examples'''
m = len(X_train)
'''parameters'''
learning_rate = 0.01
epochs = 5000
'''send data to the gradient optimizer to optimize values for W and b'''
gradient_descent(X_train, y_train)
print('\nGradient optimization completed')
print('W Expected : 0.8' + ' Learned : ' + str(W))
print('b Expected : 2.0' + ' Learned : ' + str(b))
| 28.762712
| 107
| 0.625221
|
import numpy as np
np.random.seed(111)
'''
The data is generated adding noise to the values from y = 0.8x + 2 equation
Therefore the expectation of the auto encoder is to get the values w and b closer to 0.8 and 2 respectively
'''
'''generate random x values'''
X_train = np.random.random((1, 50))[0]
'''get the reference y value'''
y_reference = 0.8*X_train + 2
'''add noise to the reference y value'''
y_train = y_reference + np.sqrt(0.01)*np.random.random((1, 50))[0]
W = np.random.random()
b = np.random.random()
'''number of training examples'''
m = len(X_train)
'''parameters'''
learning_rate = 0.01
epochs = 5000
def gradient_descent(X, y):
global W, b, learning_rate, epochs
for _epoch in range(epochs):
hypothesis = W*X + b
'''cost function'''
cost = np.divide(1, 2*m) * np.sum((hypothesis-y) ** 2)
print(hypothesis)
exit()
'''partial derivatives of the cost function with respect to W and b'''
gradient_w = np.divide(1, m) * np.sum((hypothesis-y)*X)
gradient_b = np.divide(1, m) * np.sum(hypothesis-y)
'''calculating new W and b values simultaneously'''
temp_w = W - learning_rate*gradient_w
temp_b = b - learning_rate*gradient_b
'''updating W and b simultaneously'''
W = temp_w
b = temp_b
print('\nepoch ' + str(_epoch) + ' W : ' + str(W) + ' b : ' + str(b) + ' Cost : ' + str(cost))
'''send data to the gradient optimizer to optimize values for W and b'''
gradient_descent(X_train, y_train)
print('\nGradient optimization completed')
print('W Expected : 0.8' + ' Learned : ' + str(W))
print('b Expected : 2.0' + ' Learned : ' + str(b))
| 788
| 0
| 23
|
b2107b59ecdecdb0d53f298a0ed4ee2762c4cc8c
| 458
|
py
|
Python
|
1_mundo_exercicios/ex018.py
|
GuilhermeLima182/CursoDePython
|
7e72b117142794c38cbb14284d0fa6e1dbee5bf6
|
[
"MIT"
] | null | null | null |
1_mundo_exercicios/ex018.py
|
GuilhermeLima182/CursoDePython
|
7e72b117142794c38cbb14284d0fa6e1dbee5bf6
|
[
"MIT"
] | null | null | null |
1_mundo_exercicios/ex018.py
|
GuilhermeLima182/CursoDePython
|
7e72b117142794c38cbb14284d0fa6e1dbee5bf6
|
[
"MIT"
] | null | null | null |
#Faça um programa que leia um ângulo qualquer e mostre na tela
#o valor do seno,cosseno e tangente desse ângulo.
from math import radians, sin, cos, tan
angulo = int(input('Digite um ângulo: '))
sen = sin(radians(angulo))
cos = cos(radians(angulo))
tan = tan(radians(angulo))
print('O seno do ângulo {} é {:.2f}'.format(angulo, sen))
print('O Cosseno do ângulo {} é {:.2f}'.format(angulo, cos))
print('A tangente do ângulo {} é {:.2f}'.format(angulo, tan))
| 38.166667
| 62
| 0.696507
|
#Faça um programa que leia um ângulo qualquer e mostre na tela
#o valor do seno,cosseno e tangente desse ângulo.
from math import radians, sin, cos, tan
angulo = int(input('Digite um ângulo: '))
sen = sin(radians(angulo))
cos = cos(radians(angulo))
tan = tan(radians(angulo))
print('O seno do ângulo {} é {:.2f}'.format(angulo, sen))
print('O Cosseno do ângulo {} é {:.2f}'.format(angulo, cos))
print('A tangente do ângulo {} é {:.2f}'.format(angulo, tan))
| 0
| 0
| 0
|
624916c3d5ec04f32ee59e6547283d5f7ef4f28e
| 1,313
|
py
|
Python
|
source/_sample/sympy/stereograph.py
|
showa-yojyo/notebook
|
82c15074c24d64a1dfcb70a526bc1deb2ecffe68
|
[
"MIT"
] | 14
|
2016-04-13T08:10:02.000Z
|
2021-04-19T09:42:51.000Z
|
source/_sample/sympy/stereograph.py
|
showa-yojyo/note
|
5f262ecda3df132cb66206c465d16e174061d6b9
|
[
"MIT"
] | 88
|
2017-09-27T15:07:05.000Z
|
2019-10-02T04:05:03.000Z
|
source/_sample/sympy/stereograph.py
|
showa-yojyo/note
|
5f262ecda3df132cb66206c465d16e174061d6b9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""stereograph.py: Compute length of a geodesic in the unit sphere.
"""
from sympy import (symbols, Function, Matrix, factor, simplify, latex, sqrt)
from sympy.abc import (t, xi, eta)
from sympy.printing import print_latex
if __name__ == '__main__':
main()
| 32.825
| 76
| 0.581112
|
#!/usr/bin/env python
"""stereograph.py: Compute length of a geodesic in the unit sphere.
"""
from sympy import (symbols, Function, Matrix, factor, simplify, latex, sqrt)
from sympy.abc import (t, xi, eta)
from sympy.printing import print_latex
def main():
u, v, R = symbols('u v R', real=True)
xi, eta = symbols(r'\xi \eta', cls=Function)
numer = 4*R**2
denom = u**2 + v**2 + numer
# inverse of a stereographic projection from the south pole
# onto the XY plane:
pinv = Matrix([numer * u / denom,
numer * v / denom,
-(2 * R * (u**2 + v**2)) / denom]) # OK
if False:
# textbook style
Dpinv = simplify(pinv.jacobian([u, v]))
print_latex(Dpinv, mat_str='pmatrix', mat_delim=None) # OK?
tDpinvDpinv = factor(Dpinv.transpose() @ Dpinv)
print_latex(tDpinvDpinv, mat_str='pmatrix', mat_delim=None) # OK
tDpinvDpinv = tDpinvDpinv.subs([(u, xi(t)), (v, eta(t))])
dcdt = Matrix([xi(t).diff(), eta(t).diff()])
print_latex(simplify(
sqrt((dcdt.transpose() @ tDpinvDpinv).dot(dcdt))))
else:
# directly
dpinvc = pinv.subs([(u, xi(t)), (v, eta(t))]).diff(t, 1)
print_latex(sqrt(factor(dpinvc.dot(dpinvc))))
if __name__ == '__main__':
main()
| 1,005
| 0
| 23
|
9ee6af17b80095ba1ce3ce97e7b719c8cc0ba35d
| 357
|
py
|
Python
|
visdialch/decoders/__init__.py
|
mohitsudhakar/visual-dialog-experiments
|
77cc65938b0ce99fc52b839b7821f29c7a6b32a0
|
[
"BSD-3-Clause"
] | 1
|
2020-11-15T07:40:18.000Z
|
2020-11-15T07:40:18.000Z
|
visdialch/decoders/__init__.py
|
mohitsudhakar/visual-dialog-experiments
|
77cc65938b0ce99fc52b839b7821f29c7a6b32a0
|
[
"BSD-3-Clause"
] | 3
|
2020-11-13T19:53:06.000Z
|
2020-11-16T01:23:10.000Z
|
visdialch/decoders/__init__.py
|
mohitsudhakar/visual-dialog-experiments
|
77cc65938b0ce99fc52b839b7821f29c7a6b32a0
|
[
"BSD-3-Clause"
] | null | null | null |
# from visdialch.decoders.gen import GenerativeDecoder
#from visdialch.decoders.disc import DiscriminativeDecoder
from visdialch.decoders.decoder import DiscriminativeDecoder
| 44.625
| 76
| 0.812325
|
# from visdialch.decoders.gen import GenerativeDecoder
#from visdialch.decoders.disc import DiscriminativeDecoder
from visdialch.decoders.decoder import DiscriminativeDecoder
def Decoder(model_config, *args):
name_dec_map = {"disc": DiscriminativeDecoder, "gen": GenerativeDecoder}
return name_dec_map[model_config["decoder"]](model_config, *args)
| 159
| 0
| 23
|
8389323ee21ddfba844127da575ffe9542fde2b5
| 1,761
|
py
|
Python
|
test/unit/graph/test_node.py
|
uSpike/ansible-discover
|
74ed24d01bf305f45d0bb3485846291d8b3ca473
|
[
"MIT"
] | 4
|
2018-08-22T19:56:47.000Z
|
2021-11-15T16:11:21.000Z
|
test/unit/graph/test_node.py
|
uSpike/ansible-discover
|
74ed24d01bf305f45d0bb3485846291d8b3ca473
|
[
"MIT"
] | 11
|
2018-03-09T08:35:47.000Z
|
2018-08-17T20:05:58.000Z
|
test/unit/graph/test_node.py
|
uSpike/ansible-discover
|
74ed24d01bf305f45d0bb3485846291d8b3ca473
|
[
"MIT"
] | 3
|
2018-08-14T15:35:31.000Z
|
2021-11-15T16:11:24.000Z
|
import pytest
from ansiblediscover.graph.node import Node
@pytest.mark.parametrize('this, other, equal', [
(('myname', 'mytype', 'mypath'), ('myname', 'mytype', 'mypath'), True),
(('myname', 'mytype', 'mypath'), ('othername', 'mytype', 'mypath'), False),
(('myname', 'mytype', 'mypath'), ('myname', 'othertype', 'mypath'), False),
(('myname', 'mytype', 'mypath'), ('myname', 'othertype', 'otherpath'), False),
])
@pytest.mark.parametrize('other', [
None,
[],
('myname', 'mytype', 'mypath'),
])
| 27.092308
| 93
| 0.651902
|
import pytest
from ansiblediscover.graph.node import Node
def test_build_identifier():
assert 'role:server_base' == Node.build_identifier('server_base', 'role')
def test_identifier():
node = Node('server_base', 'role', 'irrelevant')
assert 'role:server_base' == node.identifier()
def test_add_successor():
parent = Node('appserver', 'playbook', 'appserver.yml')
child = Node('server_base', 'role', 'roles/server_base')
parent.add_successor(child)
assert child in parent.successors
assert parent in child.predecessors
def test_add_predecessor():
parent = Node('appserver', 'playbook', 'appserver.yml')
child = Node('server_base', 'role', 'roles/server_base')
child.add_predecessor(parent)
assert child in parent.successors
assert parent in child.predecessors
def test_str():
name = 'myname'
typestring = 'mytype'
path = 'mypath'
node = Node(name, typestring, path)
assert str((typestring, name, path)) == str(node)
@pytest.mark.parametrize('this, other, equal', [
(('myname', 'mytype', 'mypath'), ('myname', 'mytype', 'mypath'), True),
(('myname', 'mytype', 'mypath'), ('othername', 'mytype', 'mypath'), False),
(('myname', 'mytype', 'mypath'), ('myname', 'othertype', 'mypath'), False),
(('myname', 'mytype', 'mypath'), ('myname', 'othertype', 'otherpath'), False),
])
def test_eq(this, other, equal):
this_node = Node(*this)
other_node = Node(*other)
assert (equal and (this_node == other_node)) or (not equal and (this_node != other_node))
@pytest.mark.parametrize('other', [
None,
[],
('myname', 'mytype', 'mypath'),
])
def test_eq_unequal_types(other):
this = Node('myname', 'mytype', 'mypath')
assert this != other
| 1,070
| 0
| 159
|
92a5c4abb10045ba60521150fcb257f838c2d9c5
| 3,288
|
py
|
Python
|
Core/Grader.py
|
brnomendes/grader-edx
|
d5a168bf82100f6b1196d927d1dff81ca9ad7070
|
[
"MIT"
] | null | null | null |
Core/Grader.py
|
brnomendes/grader-edx
|
d5a168bf82100f6b1196d927d1dff81ca9ad7070
|
[
"MIT"
] | 1
|
2017-04-11T23:58:00.000Z
|
2017-04-11T23:58:00.000Z
|
Core/Grader.py
|
brnomendes/grader-edx
|
d5a168bf82100f6b1196d927d1dff81ca9ad7070
|
[
"MIT"
] | null | null | null |
import datetime
from Models.Submission import Submission
from Core.Database import Database
from Core.Scorer import Scorer
from Core.Executer import Executer
from Core.Parser import Parser
| 43.263158
| 129
| 0.667579
|
import datetime
from Models.Submission import Submission
from Core.Database import Database
from Core.Scorer import Scorer
from Core.Executer import Executer
from Core.Parser import Parser
class Grader():
def __init__(self):
self._session = Database.session()
def run(self, anonymous_student_id, student_response, problem_id):
submission = self._save_submission(anonymous_student_id, student_response, problem_id)
if submission.error:
return Grader._response(False)
fail_messages = {}
submissions = Submission.get_last_submissions_each_user(submission.problem_id)
for s in submissions:
messages = self._grader_execute(submission, s)
if messages:
fail_messages[s.student_id] = messages
if not s.id == submission.id:
self._grader_execute(s, submission)
return Grader._response(fail_messages=fail_messages)
def _grader_execute(self, submission_program, submission_test):
test_result, fail_messages = Executer.run_test(submission_program, submission_test)
self._session.add(test_result)
self._session.commit()
Scorer(submission_program.student_id, submission_test.student_id, test_result).start()
return fail_messages
def _save_submission(self, anonymous_student_id, student_response, problem_id):
program, test = Parser.parse(student_response)
new_submission = Submission(datetime.datetime.now(), anonymous_student_id, problem_id, program, test, False)
test_result, fail_messages = Executer.run_test(new_submission, new_submission)
new_submission.error = True if test_result.errors > 0 else False
submission_exists = Submission.get_submission_user(new_submission.student_id, problem_id)
if submission_exists:
Scorer.resubmission_score(new_submission.student_id, -100)
Scorer(None, None, None).get_score(new_submission.student_id)
self._session.add(new_submission)
self._session.commit()
self._session.expunge(new_submission)
self._session.close()
return new_submission
@staticmethod
def _response(correct=True, fail_messages=None):
if not correct:
title = "<h3 style='color:red'><strong>Erro encontrado no Código.</strong></h3>"
msg = "<p>Execute localmente em sua máquina os testes do seu programa antes de submetê-lo.</p>"
else:
title = "<h3><strong>Submissão aceita e pontuada.</strong></h3>"
if fail_messages:
if len(fail_messages) > 1:
msg = "<p>Os casos de testes de {} alunos encontraram falhas no seu programa.</p>".format(len(fail_messages))
else:
msg = "<p>Os casos de testes de 1 aluno encontrou falhas no seu programa.</p>"
fail_msg = "<pre style='color:red;'>{}</pre>".format(list(fail_messages.values())[0][0])
msg = "{}<p><strong>Mensagem de falha:</strong></p>{}".format(msg, fail_msg)
else:
msg = "<p>Não foram encontradas falhas no seu programa por outros alunos.</p>"
return {"correct": correct, "score": 1, "msg": "{}\n{}".format(title, msg)}
| 2,933
| 147
| 23
|
3687c80748ad58f744cedde41cab9e69281efc9e
| 44,472
|
py
|
Python
|
nitorch/io/volumes/mapping.py
|
balbasty/nitorch
|
d30c3125a8a66ea1434f2b39ed03338afd9724b4
|
[
"MIT"
] | 46
|
2020-07-31T10:14:05.000Z
|
2022-03-24T12:51:46.000Z
|
nitorch/io/volumes/mapping.py
|
balbasty/nitorch
|
d30c3125a8a66ea1434f2b39ed03338afd9724b4
|
[
"MIT"
] | 36
|
2020-10-06T19:01:38.000Z
|
2022-02-03T18:07:35.000Z
|
nitorch/io/volumes/mapping.py
|
balbasty/nitorch
|
d30c3125a8a66ea1434f2b39ed03338afd9724b4
|
[
"MIT"
] | 6
|
2021-01-05T14:59:05.000Z
|
2021-11-18T18:26:45.000Z
|
from copy import copy
import torch
from nitorch.core.py import make_list
from nitorch.core import dtypes
from nitorch.spatial import affine_sub, affine_permute, voxel_size as affvx
from nitorch.io.utils.indexing import (expand_index, guess_shape, compose_index, neg2pos,
is_droppedaxis, is_newaxis, is_sliceaxis,
invert_permutation, invert_slice, slice_navigator)
from ..utils import volutils
from ..mapping import MappedFile
class MappedArray(MappedFile):
"""Base class for mapped arrays.
Mapped arrays are usually stored on-disk, along with (diverse) metadata.
They can be symbolically sliced, allowing for partial reading and
(sometimes) writing of data from/to disk.
Chaining of symbolic slicing operations is implemented in this base
class. The actual io must be implemented by the child class.
Abstract Methods
----------------
Child classes MUST implement:
* self.data(...)
Child classes SHOULD implement:
* self.metadata(...) default -> returns empty dict
Child classes MAY implement:
* self.set_data(...) default -> raises cls.FailedWriteError
* self.set_metadata(...) default -> raises cls.FailedWriteError
* cls.save_new(...) default -> raises cls.FailedWriteError
* cls.savef_new(...) default -> raises cls.FailedWriteError
Child classes SHOULD register themselves in `readers.reader_classes`.
If they implement `save_new`, child classes SHOULD register
themselves in `writers.writer_classes`.
Properties
----------
dtype : np.dtype On-disk data type
slope : float Intensity slope from on-disk to unit
inter : float Intensity shift from on-disk to unit
affine : tensor Orientation matrix: maps spatial axes to 'world'
spatial : tuple[bool] Mask of 'spatial' axes (x, y, z, ...)
slicer : tuple[index_like] Indexing into the full on-disk array
permutation : tuple[int] Permutation of the original in-disk axes.
dim : int Number of axes
voxel_size : tuple[float] World size of the spatial dimensions
readable : AccessType See `AccessType`
writable : AccessType See `AccessType`
Types
-----
FailedReadError Error raised when failing to load
FailedWriteError Error raised when failing to save
Methods
-------
slice(tuple[index_like]) Subslice the array
permute(tuple[int]) Permute axes
transpose(int, int) Permute two axes
unsqueeze(int) Insert singleton dimension
squeeze(int) Remove singleton dimension
unbind -> tuple Unstack arrays along a dimension
chunk -> tuple Unstack arrays along a dimension by chunks
split -> tuple Unstack arrays along a dimension by chunks
data(...) -> tensor Load raw data to memory
fdata(...) -> tensor Load scaled floating-point data to memory
metadata(...) -> dict Load metadata to memory
set_data(dat, ...) Write raw data to disk
set_fdata(dat, ...) Write scaled floating-point data to disk
set_metadata(**meta) Write metadata to disk
Class methods
-------------
save_new(dat, file_like) Write new file populated with `dat`
savef_new(dat, file_like) Write new file populated with (scaled) `dat`
External functions
------------------
map(file_like) -> MappedArray Build a MappedArray
load(file_like) -> tensor Load raw data to memory from a file
loadf(file_like) -> tensor Load scaled data to memory from a file
save(dat, file_like) -> Save raw data into a new file
savef(dat, file_like) -> Save scaled data into a new file
cat(tuple[MappedArray]) Concatenate arrays along a dimension
Syntaxic sugar
--------------
__call__ -> fdata Load scaled floating-point data to memory
__array__ -> fdata Load scaled floating-point data to memory
__getitem__ -> slice Subslice the array
__setitem__ -> set_fdata Write scaled floating-point data to disk
__len__ Size of the first dimension (or 0 if scalar)
"""
fname: str = None # filename (can be None if in-memory proxy)
fileobj = None # file-like object (`write`, `seek`, etc)
is_compressed: bool = None # is compressed
dtype: torch.dtype = None # on-disk data type
slope: float = 1 # intensity slope
inter: float = 0 # intensity shift
affine = None # sliced voxel-to-world
_affine = None # original voxel-to-world
spatial: tuple = None # sliced spatial mask (len -> dim)
_spatial: tuple = None # original spatial mask (len -> _dim)
shape: tuple = None # sliced shape (len -> dim)
_shape: tuple = None # original shape (len -> _dim)
slicer: tuple = None # indexing into the parent
permutation: tuple = None # permutation of original dim (len -> _dim)
dim = property(lambda self: len(self.shape)) # Nb of sliced dimensions
_dim = property(lambda self: len(self._shape)) # Nb of original dimensions
voxel_size = property(lambda self: affvx(self.affine))
__repr__ = __str__
@classmethod
def possible_extensions(cls):
"""List all possible extensions"""
return tuple()
def __getitem__(self, index):
"""Extract a sub-part of the array.
Indices can only be slices, ellipses, integers or None.
Parameters
----------
index : tuple[slice or ellipsis or int or None]
Returns
-------
subarray : type(self)
MappedArray object, with the indexing operations and affine
matrix relating to the new sub-array.
"""
return self.slice(index)
def slice(self, index, new_shape=None, _pre_expanded=False):
"""Extract a sub-part of the array.
Indices can only be slices, ellipses, integers or None.
Parameters
----------
index : tuple[slice or ellipsis or int or None]
Other Parameters
----------------
new_shape : sequence[int], optional
Output shape of the sliced object
_pre_expanded : bool, default=False
Set to True of `expand_index` has already been called on `index`
Returns
-------
subarray : type(self)
MappedArray object, with the indexing operations and affine
matrix relating to the new sub-array.
"""
index = expand_index(index, self.shape)
new_shape = guess_shape(index, self.shape)
if any(isinstance(idx, list) for idx in index) > 1:
raise ValueError('List indices not currently supported '
'(otherwise we enter advanced indexing '
'territory and it becomes too complicated).')
new = copy(self)
new.shape = new_shape
# compute new affine
if self.affine is not None:
spatial_shape = [sz for sz, msk in zip(self.shape, self.spatial)
if msk]
spatial_index = [idx for idx in index if not is_newaxis(idx)]
spatial_index = [idx for idx, msk in zip(spatial_index, self.spatial)
if msk]
affine, _ = affine_sub(self.affine, spatial_shape, tuple(spatial_index))
else:
affine = None
new.affine = affine
# compute new slicer
perm_shape = [self._shape[d] for d in self.permutation]
new.slicer = compose_index(self.slicer, index, perm_shape)
# compute new spatial mask
spatial = []
i = 0
for idx in new.slicer:
if is_newaxis(idx):
spatial.append(False)
else:
# original axis
if not is_droppedaxis(idx):
spatial.append(self._spatial[self.permutation[i]])
i += 1
new.spatial = tuple(spatial)
return new
def __setitem__(self, index, value):
"""Write scaled data to disk.
Parameters
----------
index : tuple
Tuple of indices (see `__getitem__`)
value : array or tensor
Array-like with shape `self[index].shape`
Returns
-------
self : type(self)
"""
if isinstance(value, MappedArray):
raise NotImplementedError
else:
self.__getitem__(index).set_fdata(value)
return self
def __call__(self, *args, **kwargs):
"""Get floating point data. See `fdata()`"""
return self.fdata(*args, **kwargs)
def __array__(self, dtype=None):
"""Convert to numpy array"""
return self.fdata(dtype=dtype, numpy=True)
def permute(self, dims):
"""Permute dimensions
Parameters
----------
dims : sequence[int]
A permutation of `range(self.dim)`
Returns
-------
permarray : type(self)
MappedArray object, with the indexing operations and affine
matrix reflecting the permutation.
"""
dims = list(dims)
if len(dims) != self.dim or len(dims) != len(set(dims)):
raise ValueError('there should be as many (unique) dimensions '
'as the array\'s dimension. Got {} and {}.'
.format(len(set(dims)), self.dim))
# permute tuples that relate to the current spatial dimensions
# (that part is easy)
shape = tuple(self.shape[d] for d in dims)
spatial = tuple(self.spatial[d] for d in dims)
# permute slicer
# 1) permute non-dropped dimensions
slicer_nodrop = list(filter(lambda x: not is_droppedaxis(x), self.slicer))
slicer_nodrop = [slicer_nodrop[d] for d in dims]
# 2) insert dropped dimensions
slicer = []
for idx in self.slicer:
if is_droppedaxis(idx):
slicer.append(idx)
else:
new_idx, *slicer_nodrop = slicer_nodrop
slicer.append(new_idx)
# permute permutation
# 1) insert None where new axes and remove dropped axes
old_perm = self.permutation
new_perm = []
drop_perm = []
for idx in self.slicer:
if is_newaxis(idx):
new_perm.append(None)
continue
p, *old_perm = old_perm
if not is_droppedaxis(idx):
new_perm.append(p)
else:
drop_perm.append(p)
# 2) permute
new_perm = [new_perm[d] for d in dims]
# 3) insert back dropped axes and remove new axes
perm = []
for idx in self.slicer:
if is_droppedaxis(idx):
p, *drop_perm = drop_perm
perm.append(p)
continue
p, *new_perm = new_perm
if not is_newaxis(p):
perm.append(p)
# permute affine
# (it's a bit more complicated: we need to find the
# permutation of the *current* *spatial* dimensions)
perm_spatial = [p for p in dims if self.spatial[p]]
perm_spatial = sorted(range(len(perm_spatial)),
key=lambda k: perm_spatial[k])
affine, _ = affine_permute(self.affine, perm_spatial, self.shape)
# create new object
new = copy(self)
new.shape = shape
new.spatial = spatial
new.permutation = tuple(perm)
new.slicer = tuple(slicer)
new.affine = affine
return new
def transpose(self, dim0, dim1):
"""Transpose two dimensions
Parameters
----------
dim0 : int
First dimension
dim1 : int
Second dimension
Returns
-------
permarray : type(self)
MappedArray object, with the indexing operations and affine
matrix reflecting the transposition.
"""
permutation = list(range(self.dim))
permutation[dim0] = dim1
permutation[dim1] = dim0
return self.permute(permutation)
def data(self, dtype=None, device=None, casting='unsafe', rand=True,
cutoff=None, dim=None, numpy=False):
"""Load the array in memory
Parameters
----------
dtype : type or torch.dtype or np.dtype, optional
Output data type. By default, keep the on-disk data type.
device : torch.device, default='cpu'
Output device.
rand : bool, default=False
If the on-disk dtype is not floating point, sample noise
in the uncertainty interval.
cutoff : float or (float, float), default=(0, 1)
Percentile cutoff. If only one value is provided, it is
assumed to relate to the upper percentile.
dim : int or list[int], optional
Dimensions along which to compute percentiles.
By default, they are computed on the flattened array.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe', 'rescale'}, default='unsafe'
Controls what kind of data casting may occur:
* 'no': the data types should not be cast at all.
* 'equiv': only byte-order changes are allowed.
* 'safe': only casts which can preserve values are allowed.
* 'same_kind': only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe': any data conversions may be done.
* 'rescale': the input data is rescaled to match the dynamic
range of the output type. The minimum value in the data
is mapped to the minimum value of the data type and the
maximum value in the data is mapped to the maximum value
of the data type.
* 'rescale_zero': the input data is rescaled to match the
dynamic range of the output type, but ensuring that
zero maps to zero.
> If the data is signed and cast to a signed datatype,
zero maps to zero, and the scaling is chosen so that
both the maximum and minimum value in the data fit
in the output dynamic range.
> If the data is signed and cast to an unsigned datatype,
negative values "wrap around" (as with an unsafe cast).
> If the data is unsigned and cast to a signed datatype,
values are kept positive (the negative range is unused).
numpy : bool, default=False
Return a numpy array rather than a torch tensor.
Returns
-------
dat : tensor[dtype]
"""
pass
def fdata(self, dtype=None, device=None, rand=False, cutoff=None,
dim=None, numpy=False):
"""Load the scaled array in memory
This function differs from `data` in several ways:
* The output data type should be a floating point type.
* If an affine scaling (slope, intercept) is defined in the
file, it is applied to the data.
* the default output data type is `torch.get_default_dtype()`.
Parameters
----------
dtype : dtype_like, optional
Output data type. By default, use `torch.get_default_dtype()`.
Should be a floating point type.
device : torch.device, default='cpu'
Output device.
rand : bool, default=False
If the on-disk dtype is not floating point, sample noise
in the uncertainty interval.
cutoff : float or (float, float), default=(0, 1)
Percentile cutoff. If only one value is provided, it is
assumed to relate to the upper percentile.
dim : int or list[int], optional
Dimensions along which to compute percentiles.
By default, they are computed on the flattened array.
numpy : bool, default=False
Return a numpy array rather than a torch tensor.
Returns
-------
dat : tensor[dtype]
"""
# --- sanity check ---
dtype = torch.get_default_dtype() if dtype is None else dtype
info = dtypes.dtype(dtype)
if not info.is_floating_point:
raise TypeError('Output data type should be a floating point '
'type but got {}.'.format(dtype))
# --- get unscaled data ---
dat = self.data(dtype=dtype, device=device, rand=rand,
cutoff=cutoff, dim=dim, numpy=numpy)
# --- scale ---
if self.slope != 1:
dat *= float(self.slope)
if self.inter != 0:
dat += float(self.inter)
return dat
def set_data(self, dat, casting='unsafe'):
"""Write (partial) data to disk.
Parameters
----------
dat : tensor
Tensor to write on disk. It should have shape `self.shape`.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe', 'rescale'}, default='unsafe'
Controls what kind of data casting may occur:
* 'no': the data types should not be cast at all.
* 'equiv': only byte-order changes are allowed.
* 'safe': only casts which can preserve values are allowed.
* 'same_kind': only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe': any data conversions may be done.
* 'rescale': the input data is rescaled to match the dynamic
range of the output type. The minimum value in the data
is mapped to the minimum value of the data type and the
maximum value in the data is mapped to the maximum value
of the data type.
* 'rescale_zero': the input data is rescaled to match the
dynamic range of the output type, but ensuring that
zero maps to zero.
> If the data is signed and cast to a signed datatype,
zero maps to zero, and the scaling is chosen so that
both the maximum and minimum value in the data fit
in the output dynamic range.
> If the data is signed and cast to an unsigned datatype,
negative values "wrap around" (as with an unsafe cast).
> If the data is unsigned and cast to a signed datatype,
values are kept positive (the negative range is unused).
Returns
-------
self : type(self)
"""
raise self.FailedWriteError("Method not implemented in class {}."
.format(type(self).__name__))
def set_fdata(self, dat):
"""Write (partial) scaled data to disk.
Parameters
----------
dat : tensor
Tensor to write on disk. It should have shape `self.shape`
and a floating point data type.
Returns
-------
self : type(self)
"""
# --- sanity check ---
info = dtypes.dtype(dat.dtype)
if not info.is_floating_point:
raise TypeError('Input data type should be a floating point '
'type but got {}.'.format(dat.dtype))
if dat.shape != self.shape:
raise TypeError('Expected input shape {} but got {}.'
.format(self.shape, dat.shape))
# --- detach ---
if torch.is_tensor(dat):
dat = dat.detach()
# --- unscale ---
if self.inter != 0 or self.slope != 1:
dat = dat.clone() if torch.is_tensor(dat) else dat.copy()
if self.inter != 0:
dat -= float(self.inter)
if self.slope != 1:
dat /= float(self.slope)
# --- set unscaled data ---
self.set_data(dat)
return self
def metadata(self, keys=None):
"""Read metadata
.. note:: The values returned by this function always relate to
the full volume, even if we're inside a view. That is,
we always return the affine of the original volume.
To get an affine matrix that relates to the view,
use `self.affine`.
Parameters
----------
keys : sequence[str], optional
List of metadata to load. They can either be one of the
generic metadata keys define in `io.metadata`, or a
format-specific metadata key.
By default, all generic keys that are found in the file
are returned.
Returns
-------
metadata : dict
A dictionary of metadata
"""
return dict()
def set_metadata(self, **meta):
"""Write metadata
Parameters
----------
meta : dict, optional
Dictionary of metadata.
Fields that are absent from the dictionary or that have
value `None` are kept untouched.
Returns
-------
self : type(self)
"""
raise NotImplementedError("Method not implemented in class {}."
.format(type(self).__name__))
@classmethod
def save_new(cls, dat, file_like, like=None, casting='unsafe', **metadata):
"""Write an array to disk.
This function makes educated choices for the file format and
its metadata based on the file extension, the data type and the
other options provided.
Parameters
----------
dat : tensor or array or MappedArray
Data to write
file_like : str or file object
Path to file or file object (with methods `seek`, `read`).
If the extension is known, it gets priority over `like` when
choosing the output format.
like : file or MappedArray
An array on-disk that should be used as a template for the new
file. Its metadata/layout/etc will be mimicked as much as possible.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe', 'rescale'}, default='unsafe'
Controls what kind of data casting may occur.
See `MappedArray.set_data`
metadata : dict
Metadata to store on disk. Values provided there will have
priority over `like`.
Returns
-------
dat : array or tensor
The array loaded in memory
attributes : dict, if attributes is not None
Dictionary of attributes loaded as well
"""
raise cls.FailedWriteError("Method not implemented in class {}."
.format(cls.__name__))
@classmethod
def savef_new(cls, dat, file_like, like=None, **metadata):
"""Write a scaled array to disk.
This function makes educated choices for the file format and
its metadata based on the file extension, the data type and the
other options provided.
The input data type must be a floating point type.
Parameters
----------
dat : tensor or array or MappedArray
Data to write
file_like : str or file object
Path to file or file object (with methods `seek`, `read`).
If the extension is known, it gets priority over `like` when
choosing the output format.
like : file or MappedArray
An array on-disk that should be used as a template for the new
file. Its metadata/layout/etc will be mimicked as much as possible.
metadata : dict
Metadata to store on disk. Values provided there will have
priority over `like`.
Returns
-------
dat : array or tensor
The array loaded in memory
attributes : dict, if attributes is not None
Dictionary of attributes loaded as well
"""
raise cls.FailedWriteError("Method not implemented in class {}."
.format(cls.__name__))
def unsqueeze(self, dim, ndim=1):
"""Add a dimension of size 1 in position `dim`.
Parameters
----------
dim : int
The dimension is added to the right of `dim` if `dim < 0`
else it is added to the left of `dim`.
Returns
-------
MappedArray
"""
index = [slice(None)] * self.dim
if dim < 0:
dim = self.dim + dim + 1
index = index[:dim] + ([None] * ndim) + index[dim:]
return self[tuple(index)]
def squeeze(self, dim):
"""Remove all dimensions of size 1.
Parameters
----------
dim : int or sequence[int], optional
If provided, only this dimension is squeezed. It *must* be a
dimension of size 1.
Returns
-------
MappedArray
"""
if dim is None:
dim = [d for d in range(self.dim) if self.shape[d] == 1]
dim = make_list(dim)
ndim = len(self.shape)
dim = [ndim + d if d < 0 else d for d in dim]
if any(self.shape[d] != 1 for d in dim):
raise ValueError('Impossible to squeeze non-singleton dimensions.')
index = [slice(None) if d not in dim else 0 for d in range(self.dim)]
return self[tuple(index)]
def unbind(self, dim=0, keepdim=False):
"""Extract all arrays along dimension `dim` and drop that dimension.
Parameters
----------
dim : int, default=0
Dimension along which to unstack.
keepdim : bool, default=False
Do not drop the unstacked dimension.
Returns
-------
list[MappedArray]
"""
index = [slice(None)] * self.dim
if keepdim:
index = index[:dim+1] + [None] + index[dim+1:]
out = []
for i in range(self.shape[dim]):
index[dim] = i
out.append(self[tuple(index)])
return out
def chunk(self, chunks, dim=0):
"""Split the array into smaller arrays of size `chunk` along `dim`.
Parameters
----------
chunks : int
Number of chunks.
dim : int, default=0
Dimensions along which to split.
Returns
-------
list[MappedArray]
"""
index = [slice(None)] * self.dim
out = []
for i in range(self.shape[dim]):
index[dim] = slice(i*chunks, (i+1)*chunks)
out.append(self[tuple(index)])
return out
def split(self, chunks, dim=0):
"""Split the array into smaller arrays along `dim`.
Parameters
----------
chunks : int or list[int]
If `int`: Number of chunks (see `self.chunk`)
Else: Size of each chunk. Must sum to `self.shape[dim]`.
dim : int, default=0
Dimensions along which to split.
Returns
-------
list[MappedArray]
"""
if isinstance(chunks, int):
return self.chunk(chunks, dim)
chunks = make_list(chunks)
if sum(chunks) != self.shape[dim]:
raise ValueError('Chunks must cover the full dimension. '
'Got {} and {}.'
.format(sum(chunks), self.shape[dim]))
index = [slice(None)] * self.dim
previous_chunks = 0
out = []
for chunk in chunks:
index[dim] = slice(previous_chunks, previous_chunks+chunk)
out.append(self[tuple(index)])
previous_chunks += chunk
return out
def channel_first(self, atleast=0):
"""Permute the dimensions such that all spatial axes are on the right.
Parameters
----------
atleast : int, default=0
Make sure that at least this number of non-spatial dimensions
exist (new axes are inserted accordingly).
Returns
-------
MappedArray
"""
# 1) move spatial dimensions to the right
perm = []
spatial = []
for d, is_spatial in enumerate(self.spatial):
if is_spatial:
spatial.append(d)
else:
perm.append(d)
nb_channels = len(perm)
perm = perm + spatial
new = self.permute(perm)
# 2) add channel axes
add_channels = max(0, atleast - nb_channels)
if add_channels:
index = [slice(None)] * nb_channels \
+ [None] * add_channels \
+ [Ellipsis]
new = new.slice(tuple(index))
return new
def channel_last(self, atleast=0):
"""Permute the dimensions such that all spatial axes are on the left.
Parameters
----------
atleast : int, default=0
Make sure that at least this number of non-spatial dimensions
exist (new axes are inserted accordingly).
Returns
-------
MappedArray
"""
# 1) move spatial dimensions to the right
perm = []
spatial = []
for d, is_spatial in enumerate(self.spatial):
if is_spatial:
spatial.append(d)
else:
perm.append(d)
nb_channels = len(perm)
perm = spatial + perm
new = self.permute(perm)
# 2) add channel axes
add_channels = max(0, atleast - nb_channels)
if add_channels:
index = [Ellipsis] + [None] * add_channels
new = new.slice(tuple(index))
return new
class CatArray(MappedArray):
"""A concatenation of mapped arrays.
This is largely inspired by virtual concatenation of file_array in
SPM: https://github.com/spm/spm12/blob/master/@file_array/cat.m
"""
_arrays: tuple = []
_dim_cat: int = None
# defer attributes
fname = property(lambda self: tuple(a.fname for a in self._arrays))
fileobj = property(lambda self: tuple(a.fileobj for a in self._arrays))
is_compressed = property(lambda self: tuple(a.is_compressed for a in self._arrays))
dtype = property(lambda self: tuple(a.dtype for a in self._arrays))
slope = property(lambda self: tuple(a.slope for a in self._arrays))
inter = property(lambda self: tuple(a.inter for a in self._arrays))
_shape = property(lambda self: tuple(a._shape for a in self._arrays))
_dim = property(lambda self: tuple(a._dim for a in self._arrays))
affine = property(lambda self: tuple(a.affine for a in self._arrays))
_affine = property(lambda self: tuple(a._affine for a in self._arrays))
spatial = property(lambda self: tuple(a.spatial for a in self._arrays))
_spatial = property(lambda self: tuple(a._spatial for a in self._arrays))
slicer = property(lambda self: tuple(a.slicer for a in self._arrays))
permutation = property(lambda self: tuple(a.permutation for a in self._arrays))
voxel_size = property(lambda self: tuple(a.voxel_size for a in self._arrays))
def __init__(self, arrays, dim=0):
"""
Parameters
----------
arrays : sequence[MappedArray]
Arrays to concatenate. Their shapes should be identical
except along dimension `dim`.
dim : int, default=0
Dimension along white to concatenate the arrays
"""
super().__init__()
arrays = list(arrays)
dim = dim or 0
self._dim_cat = dim
# sanity checks
shapes = []
for i, array in enumerate(arrays):
if not isinstance(array, MappedArray):
raise TypeError('Input arrays should be `MappedArray` '
'instances. Got {}.',format(type(array)))
shape = list(array.shape)
del shape[dim]
shapes.append(shape)
shape0, *shapes = shapes
if not all(shape == shape0 for shape in shapes):
raise ValueError('Shapes of all concatenated arrays should '
'be equal except in the concatenation dimension.')
# compute output shape
shape = list(arrays[0].shape)
dims = [array.shape[dim] for array in arrays]
shape[dim] = sum(dims)
self.shape = tuple(shape)
# concatenate
self._arrays = tuple(arrays)
__repr__ = __str__
def cat(arrays, dim=0):
"""Concatenate mapped arrays along a dimension.
Parameters
----------
arrays : sequence[MappedArray]
Arrays to concatenate. Their shapes should be identical
except along dimension `dim`.
dim : int, default=0
Dimension along white to concatenate the arrays
Returns
-------
CatArray
A symbolic concatenation of all input arrays.
Its shape along dimension `dim` is the sum of all input shapes
along dimension `dim`.
"""
return CatArray(arrays, dim)
def stack(arrays, dim=0):
"""Stack mapped arrays along a dimension.
Parameters
----------
arrays : sequence[MappedArray]
Arrays to concatenate. Their shapes should be identical
except along dimension `dim`.
dim : int, default=0
Dimension along white to concatenate the arrays
Returns
-------
CatArray
A symbolic stack of all input arrays.
"""
arrays = [array.unsqueeze(dim=dim) for array in arrays]
return cat(arrays, dim=dim)
| 37.277452
| 93
| 0.565727
|
from copy import copy
import torch
from nitorch.core.py import make_list
from nitorch.core import dtypes
from nitorch.spatial import affine_sub, affine_permute, voxel_size as affvx
from nitorch.io.utils.indexing import (expand_index, guess_shape, compose_index, neg2pos,
is_droppedaxis, is_newaxis, is_sliceaxis,
invert_permutation, invert_slice, slice_navigator)
from ..utils import volutils
from ..mapping import MappedFile
class MappedArray(MappedFile):
"""Base class for mapped arrays.
Mapped arrays are usually stored on-disk, along with (diverse) metadata.
They can be symbolically sliced, allowing for partial reading and
(sometimes) writing of data from/to disk.
Chaining of symbolic slicing operations is implemented in this base
class. The actual io must be implemented by the child class.
Abstract Methods
----------------
Child classes MUST implement:
* self.data(...)
Child classes SHOULD implement:
* self.metadata(...) default -> returns empty dict
Child classes MAY implement:
* self.set_data(...) default -> raises cls.FailedWriteError
* self.set_metadata(...) default -> raises cls.FailedWriteError
* cls.save_new(...) default -> raises cls.FailedWriteError
* cls.savef_new(...) default -> raises cls.FailedWriteError
Child classes SHOULD register themselves in `readers.reader_classes`.
If they implement `save_new`, child classes SHOULD register
themselves in `writers.writer_classes`.
Properties
----------
dtype : np.dtype On-disk data type
slope : float Intensity slope from on-disk to unit
inter : float Intensity shift from on-disk to unit
affine : tensor Orientation matrix: maps spatial axes to 'world'
spatial : tuple[bool] Mask of 'spatial' axes (x, y, z, ...)
slicer : tuple[index_like] Indexing into the full on-disk array
permutation : tuple[int] Permutation of the original in-disk axes.
dim : int Number of axes
voxel_size : tuple[float] World size of the spatial dimensions
readable : AccessType See `AccessType`
writable : AccessType See `AccessType`
Types
-----
FailedReadError Error raised when failing to load
FailedWriteError Error raised when failing to save
Methods
-------
slice(tuple[index_like]) Subslice the array
permute(tuple[int]) Permute axes
transpose(int, int) Permute two axes
unsqueeze(int) Insert singleton dimension
squeeze(int) Remove singleton dimension
unbind -> tuple Unstack arrays along a dimension
chunk -> tuple Unstack arrays along a dimension by chunks
split -> tuple Unstack arrays along a dimension by chunks
data(...) -> tensor Load raw data to memory
fdata(...) -> tensor Load scaled floating-point data to memory
metadata(...) -> dict Load metadata to memory
set_data(dat, ...) Write raw data to disk
set_fdata(dat, ...) Write scaled floating-point data to disk
set_metadata(**meta) Write metadata to disk
Class methods
-------------
save_new(dat, file_like) Write new file populated with `dat`
savef_new(dat, file_like) Write new file populated with (scaled) `dat`
External functions
------------------
map(file_like) -> MappedArray Build a MappedArray
load(file_like) -> tensor Load raw data to memory from a file
loadf(file_like) -> tensor Load scaled data to memory from a file
save(dat, file_like) -> Save raw data into a new file
savef(dat, file_like) -> Save scaled data into a new file
cat(tuple[MappedArray]) Concatenate arrays along a dimension
Syntaxic sugar
--------------
__call__ -> fdata Load scaled floating-point data to memory
__array__ -> fdata Load scaled floating-point data to memory
__getitem__ -> slice Subslice the array
__setitem__ -> set_fdata Write scaled floating-point data to disk
__len__ Size of the first dimension (or 0 if scalar)
"""
fname: str = None # filename (can be None if in-memory proxy)
fileobj = None # file-like object (`write`, `seek`, etc)
is_compressed: bool = None # is compressed
dtype: torch.dtype = None # on-disk data type
slope: float = 1 # intensity slope
inter: float = 0 # intensity shift
affine = None # sliced voxel-to-world
_affine = None # original voxel-to-world
spatial: tuple = None # sliced spatial mask (len -> dim)
_spatial: tuple = None # original spatial mask (len -> _dim)
shape: tuple = None # sliced shape (len -> dim)
_shape: tuple = None # original shape (len -> _dim)
slicer: tuple = None # indexing into the parent
permutation: tuple = None # permutation of original dim (len -> _dim)
dim = property(lambda self: len(self.shape)) # Nb of sliced dimensions
_dim = property(lambda self: len(self._shape)) # Nb of original dimensions
voxel_size = property(lambda self: affvx(self.affine))
def __init__(self, **kwargs):
self._init(**kwargs)
def _init(self, **kwargs):
for key, val in kwargs:
setattr(self, key, val)
if self.permutation is None:
self.permutation = tuple(range(self._dim))
if self.slicer is None:
# same layout as on-disk
self.spatial = self._spatial
self.affine = self._affine
self.shape = self._shape
self.slicer = expand_index([Ellipsis], self._shape)
return self
def __str__(self):
return '{}(shape={}, dtype={})'.format(
type(self).__name__, self.shape, self.dtype)
__repr__ = __str__
def __len__(self):
if len(self.shape) > 0:
return self.shape[0]
else:
return 0
@classmethod
def possible_extensions(cls):
"""List all possible extensions"""
return tuple()
def __getitem__(self, index):
"""Extract a sub-part of the array.
Indices can only be slices, ellipses, integers or None.
Parameters
----------
index : tuple[slice or ellipsis or int or None]
Returns
-------
subarray : type(self)
MappedArray object, with the indexing operations and affine
matrix relating to the new sub-array.
"""
return self.slice(index)
def slice(self, index, new_shape=None, _pre_expanded=False):
"""Extract a sub-part of the array.
Indices can only be slices, ellipses, integers or None.
Parameters
----------
index : tuple[slice or ellipsis or int or None]
Other Parameters
----------------
new_shape : sequence[int], optional
Output shape of the sliced object
_pre_expanded : bool, default=False
Set to True of `expand_index` has already been called on `index`
Returns
-------
subarray : type(self)
MappedArray object, with the indexing operations and affine
matrix relating to the new sub-array.
"""
index = expand_index(index, self.shape)
new_shape = guess_shape(index, self.shape)
if any(isinstance(idx, list) for idx in index) > 1:
raise ValueError('List indices not currently supported '
'(otherwise we enter advanced indexing '
'territory and it becomes too complicated).')
new = copy(self)
new.shape = new_shape
# compute new affine
if self.affine is not None:
spatial_shape = [sz for sz, msk in zip(self.shape, self.spatial)
if msk]
spatial_index = [idx for idx in index if not is_newaxis(idx)]
spatial_index = [idx for idx, msk in zip(spatial_index, self.spatial)
if msk]
affine, _ = affine_sub(self.affine, spatial_shape, tuple(spatial_index))
else:
affine = None
new.affine = affine
# compute new slicer
perm_shape = [self._shape[d] for d in self.permutation]
new.slicer = compose_index(self.slicer, index, perm_shape)
# compute new spatial mask
spatial = []
i = 0
for idx in new.slicer:
if is_newaxis(idx):
spatial.append(False)
else:
# original axis
if not is_droppedaxis(idx):
spatial.append(self._spatial[self.permutation[i]])
i += 1
new.spatial = tuple(spatial)
return new
def __setitem__(self, index, value):
"""Write scaled data to disk.
Parameters
----------
index : tuple
Tuple of indices (see `__getitem__`)
value : array or tensor
Array-like with shape `self[index].shape`
Returns
-------
self : type(self)
"""
if isinstance(value, MappedArray):
raise NotImplementedError
else:
self.__getitem__(index).set_fdata(value)
return self
def __call__(self, *args, **kwargs):
"""Get floating point data. See `fdata()`"""
return self.fdata(*args, **kwargs)
def __array__(self, dtype=None):
"""Convert to numpy array"""
return self.fdata(dtype=dtype, numpy=True)
def permute(self, dims):
"""Permute dimensions
Parameters
----------
dims : sequence[int]
A permutation of `range(self.dim)`
Returns
-------
permarray : type(self)
MappedArray object, with the indexing operations and affine
matrix reflecting the permutation.
"""
dims = list(dims)
if len(dims) != self.dim or len(dims) != len(set(dims)):
raise ValueError('there should be as many (unique) dimensions '
'as the array\'s dimension. Got {} and {}.'
.format(len(set(dims)), self.dim))
# permute tuples that relate to the current spatial dimensions
# (that part is easy)
shape = tuple(self.shape[d] for d in dims)
spatial = tuple(self.spatial[d] for d in dims)
# permute slicer
# 1) permute non-dropped dimensions
slicer_nodrop = list(filter(lambda x: not is_droppedaxis(x), self.slicer))
slicer_nodrop = [slicer_nodrop[d] for d in dims]
# 2) insert dropped dimensions
slicer = []
for idx in self.slicer:
if is_droppedaxis(idx):
slicer.append(idx)
else:
new_idx, *slicer_nodrop = slicer_nodrop
slicer.append(new_idx)
# permute permutation
# 1) insert None where new axes and remove dropped axes
old_perm = self.permutation
new_perm = []
drop_perm = []
for idx in self.slicer:
if is_newaxis(idx):
new_perm.append(None)
continue
p, *old_perm = old_perm
if not is_droppedaxis(idx):
new_perm.append(p)
else:
drop_perm.append(p)
# 2) permute
new_perm = [new_perm[d] for d in dims]
# 3) insert back dropped axes and remove new axes
perm = []
for idx in self.slicer:
if is_droppedaxis(idx):
p, *drop_perm = drop_perm
perm.append(p)
continue
p, *new_perm = new_perm
if not is_newaxis(p):
perm.append(p)
# permute affine
# (it's a bit more complicated: we need to find the
# permutation of the *current* *spatial* dimensions)
perm_spatial = [p for p in dims if self.spatial[p]]
perm_spatial = sorted(range(len(perm_spatial)),
key=lambda k: perm_spatial[k])
affine, _ = affine_permute(self.affine, perm_spatial, self.shape)
# create new object
new = copy(self)
new.shape = shape
new.spatial = spatial
new.permutation = tuple(perm)
new.slicer = tuple(slicer)
new.affine = affine
return new
def movedim(self, source, destination):
dim = self.dim
source = make_list(source)
destination = make_list(destination)
if len(destination) == 1:
# we assume that the user wishes to keep moved dimensions
# in the order they were provided
destination = destination[0]
if destination >= 0:
destination = list(range(destination, destination + len(source)))
else:
destination = list(range(destination + 1 - len(source), destination + 1))
if len(source) != len(destination):
raise ValueError('Expected as many source as destination positions.')
source = [dim + src if src < 0 else src for src in source]
destination = [dim + dst if dst < 0 else dst for dst in destination]
if len(set(source)) != len(source):
raise ValueError(f'Expected source positions to be unique but got '
f'{source}')
if len(set(destination)) != len(destination):
raise ValueError(f'Expected destination positions to be unique but got '
f'{destination}')
# compute permutation
positions_in = list(range(dim))
positions_out = [None] * dim
for src, dst in zip(source, destination):
positions_out[dst] = src
positions_in[src] = None
positions_in = filter(lambda x: x is not None, positions_in)
for i, pos in enumerate(positions_out):
if pos is None:
positions_out[i], *positions_in = positions_in
return self.permute(positions_out)
def transpose(self, dim0, dim1):
"""Transpose two dimensions
Parameters
----------
dim0 : int
First dimension
dim1 : int
Second dimension
Returns
-------
permarray : type(self)
MappedArray object, with the indexing operations and affine
matrix reflecting the transposition.
"""
permutation = list(range(self.dim))
permutation[dim0] = dim1
permutation[dim1] = dim0
return self.permute(permutation)
def data(self, dtype=None, device=None, casting='unsafe', rand=True,
cutoff=None, dim=None, numpy=False):
"""Load the array in memory
Parameters
----------
dtype : type or torch.dtype or np.dtype, optional
Output data type. By default, keep the on-disk data type.
device : torch.device, default='cpu'
Output device.
rand : bool, default=False
If the on-disk dtype is not floating point, sample noise
in the uncertainty interval.
cutoff : float or (float, float), default=(0, 1)
Percentile cutoff. If only one value is provided, it is
assumed to relate to the upper percentile.
dim : int or list[int], optional
Dimensions along which to compute percentiles.
By default, they are computed on the flattened array.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe', 'rescale'}, default='unsafe'
Controls what kind of data casting may occur:
* 'no': the data types should not be cast at all.
* 'equiv': only byte-order changes are allowed.
* 'safe': only casts which can preserve values are allowed.
* 'same_kind': only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe': any data conversions may be done.
* 'rescale': the input data is rescaled to match the dynamic
range of the output type. The minimum value in the data
is mapped to the minimum value of the data type and the
maximum value in the data is mapped to the maximum value
of the data type.
* 'rescale_zero': the input data is rescaled to match the
dynamic range of the output type, but ensuring that
zero maps to zero.
> If the data is signed and cast to a signed datatype,
zero maps to zero, and the scaling is chosen so that
both the maximum and minimum value in the data fit
in the output dynamic range.
> If the data is signed and cast to an unsigned datatype,
negative values "wrap around" (as with an unsafe cast).
> If the data is unsigned and cast to a signed datatype,
values are kept positive (the negative range is unused).
numpy : bool, default=False
Return a numpy array rather than a torch tensor.
Returns
-------
dat : tensor[dtype]
"""
pass
def fdata(self, dtype=None, device=None, rand=False, cutoff=None,
dim=None, numpy=False):
"""Load the scaled array in memory
This function differs from `data` in several ways:
* The output data type should be a floating point type.
* If an affine scaling (slope, intercept) is defined in the
file, it is applied to the data.
* the default output data type is `torch.get_default_dtype()`.
Parameters
----------
dtype : dtype_like, optional
Output data type. By default, use `torch.get_default_dtype()`.
Should be a floating point type.
device : torch.device, default='cpu'
Output device.
rand : bool, default=False
If the on-disk dtype is not floating point, sample noise
in the uncertainty interval.
cutoff : float or (float, float), default=(0, 1)
Percentile cutoff. If only one value is provided, it is
assumed to relate to the upper percentile.
dim : int or list[int], optional
Dimensions along which to compute percentiles.
By default, they are computed on the flattened array.
numpy : bool, default=False
Return a numpy array rather than a torch tensor.
Returns
-------
dat : tensor[dtype]
"""
# --- sanity check ---
dtype = torch.get_default_dtype() if dtype is None else dtype
info = dtypes.dtype(dtype)
if not info.is_floating_point:
raise TypeError('Output data type should be a floating point '
'type but got {}.'.format(dtype))
# --- get unscaled data ---
dat = self.data(dtype=dtype, device=device, rand=rand,
cutoff=cutoff, dim=dim, numpy=numpy)
# --- scale ---
if self.slope != 1:
dat *= float(self.slope)
if self.inter != 0:
dat += float(self.inter)
return dat
def set_data(self, dat, casting='unsafe'):
"""Write (partial) data to disk.
Parameters
----------
dat : tensor
Tensor to write on disk. It should have shape `self.shape`.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe', 'rescale'}, default='unsafe'
Controls what kind of data casting may occur:
* 'no': the data types should not be cast at all.
* 'equiv': only byte-order changes are allowed.
* 'safe': only casts which can preserve values are allowed.
* 'same_kind': only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe': any data conversions may be done.
* 'rescale': the input data is rescaled to match the dynamic
range of the output type. The minimum value in the data
is mapped to the minimum value of the data type and the
maximum value in the data is mapped to the maximum value
of the data type.
* 'rescale_zero': the input data is rescaled to match the
dynamic range of the output type, but ensuring that
zero maps to zero.
> If the data is signed and cast to a signed datatype,
zero maps to zero, and the scaling is chosen so that
both the maximum and minimum value in the data fit
in the output dynamic range.
> If the data is signed and cast to an unsigned datatype,
negative values "wrap around" (as with an unsafe cast).
> If the data is unsigned and cast to a signed datatype,
values are kept positive (the negative range is unused).
Returns
-------
self : type(self)
"""
raise self.FailedWriteError("Method not implemented in class {}."
.format(type(self).__name__))
def set_fdata(self, dat):
"""Write (partial) scaled data to disk.
Parameters
----------
dat : tensor
Tensor to write on disk. It should have shape `self.shape`
and a floating point data type.
Returns
-------
self : type(self)
"""
# --- sanity check ---
info = dtypes.dtype(dat.dtype)
if not info.is_floating_point:
raise TypeError('Input data type should be a floating point '
'type but got {}.'.format(dat.dtype))
if dat.shape != self.shape:
raise TypeError('Expected input shape {} but got {}.'
.format(self.shape, dat.shape))
# --- detach ---
if torch.is_tensor(dat):
dat = dat.detach()
# --- unscale ---
if self.inter != 0 or self.slope != 1:
dat = dat.clone() if torch.is_tensor(dat) else dat.copy()
if self.inter != 0:
dat -= float(self.inter)
if self.slope != 1:
dat /= float(self.slope)
# --- set unscaled data ---
self.set_data(dat)
return self
def metadata(self, keys=None):
"""Read metadata
.. note:: The values returned by this function always relate to
the full volume, even if we're inside a view. That is,
we always return the affine of the original volume.
To get an affine matrix that relates to the view,
use `self.affine`.
Parameters
----------
keys : sequence[str], optional
List of metadata to load. They can either be one of the
generic metadata keys define in `io.metadata`, or a
format-specific metadata key.
By default, all generic keys that are found in the file
are returned.
Returns
-------
metadata : dict
A dictionary of metadata
"""
return dict()
def set_metadata(self, **meta):
"""Write metadata
Parameters
----------
meta : dict, optional
Dictionary of metadata.
Fields that are absent from the dictionary or that have
value `None` are kept untouched.
Returns
-------
self : type(self)
"""
raise NotImplementedError("Method not implemented in class {}."
.format(type(self).__name__))
@classmethod
def save_new(cls, dat, file_like, like=None, casting='unsafe', **metadata):
"""Write an array to disk.
This function makes educated choices for the file format and
its metadata based on the file extension, the data type and the
other options provided.
Parameters
----------
dat : tensor or array or MappedArray
Data to write
file_like : str or file object
Path to file or file object (with methods `seek`, `read`).
If the extension is known, it gets priority over `like` when
choosing the output format.
like : file or MappedArray
An array on-disk that should be used as a template for the new
file. Its metadata/layout/etc will be mimicked as much as possible.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe', 'rescale'}, default='unsafe'
Controls what kind of data casting may occur.
See `MappedArray.set_data`
metadata : dict
Metadata to store on disk. Values provided there will have
priority over `like`.
Returns
-------
dat : array or tensor
The array loaded in memory
attributes : dict, if attributes is not None
Dictionary of attributes loaded as well
"""
raise cls.FailedWriteError("Method not implemented in class {}."
.format(cls.__name__))
@classmethod
def savef_new(cls, dat, file_like, like=None, **metadata):
"""Write a scaled array to disk.
This function makes educated choices for the file format and
its metadata based on the file extension, the data type and the
other options provided.
The input data type must be a floating point type.
Parameters
----------
dat : tensor or array or MappedArray
Data to write
file_like : str or file object
Path to file or file object (with methods `seek`, `read`).
If the extension is known, it gets priority over `like` when
choosing the output format.
like : file or MappedArray
An array on-disk that should be used as a template for the new
file. Its metadata/layout/etc will be mimicked as much as possible.
metadata : dict
Metadata to store on disk. Values provided there will have
priority over `like`.
Returns
-------
dat : array or tensor
The array loaded in memory
attributes : dict, if attributes is not None
Dictionary of attributes loaded as well
"""
raise cls.FailedWriteError("Method not implemented in class {}."
.format(cls.__name__))
def unsqueeze(self, dim, ndim=1):
"""Add a dimension of size 1 in position `dim`.
Parameters
----------
dim : int
The dimension is added to the right of `dim` if `dim < 0`
else it is added to the left of `dim`.
Returns
-------
MappedArray
"""
index = [slice(None)] * self.dim
if dim < 0:
dim = self.dim + dim + 1
index = index[:dim] + ([None] * ndim) + index[dim:]
return self[tuple(index)]
def squeeze(self, dim):
"""Remove all dimensions of size 1.
Parameters
----------
dim : int or sequence[int], optional
If provided, only this dimension is squeezed. It *must* be a
dimension of size 1.
Returns
-------
MappedArray
"""
if dim is None:
dim = [d for d in range(self.dim) if self.shape[d] == 1]
dim = make_list(dim)
ndim = len(self.shape)
dim = [ndim + d if d < 0 else d for d in dim]
if any(self.shape[d] != 1 for d in dim):
raise ValueError('Impossible to squeeze non-singleton dimensions.')
index = [slice(None) if d not in dim else 0 for d in range(self.dim)]
return self[tuple(index)]
def unbind(self, dim=0, keepdim=False):
"""Extract all arrays along dimension `dim` and drop that dimension.
Parameters
----------
dim : int, default=0
Dimension along which to unstack.
keepdim : bool, default=False
Do not drop the unstacked dimension.
Returns
-------
list[MappedArray]
"""
index = [slice(None)] * self.dim
if keepdim:
index = index[:dim+1] + [None] + index[dim+1:]
out = []
for i in range(self.shape[dim]):
index[dim] = i
out.append(self[tuple(index)])
return out
def chunk(self, chunks, dim=0):
"""Split the array into smaller arrays of size `chunk` along `dim`.
Parameters
----------
chunks : int
Number of chunks.
dim : int, default=0
Dimensions along which to split.
Returns
-------
list[MappedArray]
"""
index = [slice(None)] * self.dim
out = []
for i in range(self.shape[dim]):
index[dim] = slice(i*chunks, (i+1)*chunks)
out.append(self[tuple(index)])
return out
def split(self, chunks, dim=0):
"""Split the array into smaller arrays along `dim`.
Parameters
----------
chunks : int or list[int]
If `int`: Number of chunks (see `self.chunk`)
Else: Size of each chunk. Must sum to `self.shape[dim]`.
dim : int, default=0
Dimensions along which to split.
Returns
-------
list[MappedArray]
"""
if isinstance(chunks, int):
return self.chunk(chunks, dim)
chunks = make_list(chunks)
if sum(chunks) != self.shape[dim]:
raise ValueError('Chunks must cover the full dimension. '
'Got {} and {}.'
.format(sum(chunks), self.shape[dim]))
index = [slice(None)] * self.dim
previous_chunks = 0
out = []
for chunk in chunks:
index[dim] = slice(previous_chunks, previous_chunks+chunk)
out.append(self[tuple(index)])
previous_chunks += chunk
return out
def channel_first(self, atleast=0):
"""Permute the dimensions such that all spatial axes are on the right.
Parameters
----------
atleast : int, default=0
Make sure that at least this number of non-spatial dimensions
exist (new axes are inserted accordingly).
Returns
-------
MappedArray
"""
# 1) move spatial dimensions to the right
perm = []
spatial = []
for d, is_spatial in enumerate(self.spatial):
if is_spatial:
spatial.append(d)
else:
perm.append(d)
nb_channels = len(perm)
perm = perm + spatial
new = self.permute(perm)
# 2) add channel axes
add_channels = max(0, atleast - nb_channels)
if add_channels:
index = [slice(None)] * nb_channels \
+ [None] * add_channels \
+ [Ellipsis]
new = new.slice(tuple(index))
return new
def channel_last(self, atleast=0):
"""Permute the dimensions such that all spatial axes are on the left.
Parameters
----------
atleast : int, default=0
Make sure that at least this number of non-spatial dimensions
exist (new axes are inserted accordingly).
Returns
-------
MappedArray
"""
# 1) move spatial dimensions to the right
perm = []
spatial = []
for d, is_spatial in enumerate(self.spatial):
if is_spatial:
spatial.append(d)
else:
perm.append(d)
nb_channels = len(perm)
perm = spatial + perm
new = self.permute(perm)
# 2) add channel axes
add_channels = max(0, atleast - nb_channels)
if add_channels:
index = [Ellipsis] + [None] * add_channels
new = new.slice(tuple(index))
return new
class CatArray(MappedArray):
"""A concatenation of mapped arrays.
This is largely inspired by virtual concatenation of file_array in
SPM: https://github.com/spm/spm12/blob/master/@file_array/cat.m
"""
_arrays: tuple = []
_dim_cat: int = None
# defer attributes
fname = property(lambda self: tuple(a.fname for a in self._arrays))
fileobj = property(lambda self: tuple(a.fileobj for a in self._arrays))
is_compressed = property(lambda self: tuple(a.is_compressed for a in self._arrays))
dtype = property(lambda self: tuple(a.dtype for a in self._arrays))
slope = property(lambda self: tuple(a.slope for a in self._arrays))
inter = property(lambda self: tuple(a.inter for a in self._arrays))
_shape = property(lambda self: tuple(a._shape for a in self._arrays))
_dim = property(lambda self: tuple(a._dim for a in self._arrays))
affine = property(lambda self: tuple(a.affine for a in self._arrays))
_affine = property(lambda self: tuple(a._affine for a in self._arrays))
spatial = property(lambda self: tuple(a.spatial for a in self._arrays))
_spatial = property(lambda self: tuple(a._spatial for a in self._arrays))
slicer = property(lambda self: tuple(a.slicer for a in self._arrays))
permutation = property(lambda self: tuple(a.permutation for a in self._arrays))
voxel_size = property(lambda self: tuple(a.voxel_size for a in self._arrays))
def __init__(self, arrays, dim=0):
"""
Parameters
----------
arrays : sequence[MappedArray]
Arrays to concatenate. Their shapes should be identical
except along dimension `dim`.
dim : int, default=0
Dimension along white to concatenate the arrays
"""
super().__init__()
arrays = list(arrays)
dim = dim or 0
self._dim_cat = dim
# sanity checks
shapes = []
for i, array in enumerate(arrays):
if not isinstance(array, MappedArray):
raise TypeError('Input arrays should be `MappedArray` '
'instances. Got {}.',format(type(array)))
shape = list(array.shape)
del shape[dim]
shapes.append(shape)
shape0, *shapes = shapes
if not all(shape == shape0 for shape in shapes):
raise ValueError('Shapes of all concatenated arrays should '
'be equal except in the concatenation dimension.')
# compute output shape
shape = list(arrays[0].shape)
dims = [array.shape[dim] for array in arrays]
shape[dim] = sum(dims)
self.shape = tuple(shape)
# concatenate
self._arrays = tuple(arrays)
def __str__(self):
dtype_str = tuple(str(dt) for dt in self.dtype)
dtype_str = '(' + ', '.join(dtype_str) + ')'
return '{}(shape={}, dtype={})'.format(
type(self).__name__, self.shape, dtype_str)
__repr__ = __str__
def slice(self, index, new_shape=None):
# overload slicer -> slice individual arrays
index = expand_index(index, self.shape)
new_shape = guess_shape(index, self.shape)
assert len(index) > 0, "index should never be empty here"
if any(isinstance(idx, list) for idx in index) > 1:
raise ValueError('List indices not currently supported '
'(otherwise we enter advanced indexing '
'territory and it becomes too complicated).')
index = list(index)
shape_cat = self.shape[self._dim_cat]
# find out which index corresponds to the concatenated dimension
# + compute the concatenated dimension in the output array
new_dim_cat = self._dim_cat
nb_old_dim = -1
for map_dim_cat, idx in enumerate(index):
if is_newaxis(idx):
# an axis was added: dim_cat moves to the right
new_dim_cat = new_dim_cat + 1
elif is_droppedaxis(idx):
# an axis was dropped: dim_cat moves to the left
new_dim_cat = new_dim_cat - 1
nb_old_dim += 1
else:
nb_old_dim += 1
if nb_old_dim >= self._dim_cat:
# found the concatenated dimension
break
index_cat = index[map_dim_cat]
index_cat = neg2pos(index_cat, shape_cat) # /!\ do not call it again
if is_droppedaxis(index_cat):
# if the concatenated dimension is dropped, return the
# corresponding array (sliced)
if index_cat < 0 or index_cat >= shape_cat:
raise IndexError('Index {} out of bounds [0, {}]'
.format(index_cat, shape_cat))
nb_pre = 0
for i in range(len(self._arrays)):
if nb_pre < index_cat:
# we haven't found the volume yet
nb_pre += self._arrays[i].shape[self._dim_cat]
continue
if i > index_cat:
# we've passed the volume
i = i - 1
nb_pre -= self._arrays[i].shape[self._dim_cat]
index_cat = index_cat - nb_pre
index[map_dim_cat] = index_cat
return self._arrays[i].slice(tuple(index), new_shape)
# else, we may have to drop some volumes and slice the others
assert is_sliceaxis(index_cat), "This should not happen"
arrays = self._arrays
step = index_cat.step or 1
if step < 0:
# if negative step:
# 1) invert everything
invert_index = [slice(None)] * self.dim
invert_index[self._dim_cat] = slice(None, None, -1)
arrays = [array[tuple(invert_index)] for array in arrays]
# 2) update index_cat
index_cat = invert_slice(index_cat, shape_cat, neg2pos=False)
# compute navigator
# (step is positive)
start, step, nb_elem_total = slice_navigator(index_cat, shape_cat, do_neg2pos=False)
nb_pre = 0 # nb of slices on the left of the cursor
kept_arrays = [] # arrays at least partly in bounds
starts = [] # start in each kept array
stops = [] # stop in each kept array
size_since_start = 0 # nb of in-bounds slices left of the cursor
while len(arrays) > 0:
# pop array
array, *arrays = arrays
size_cat = array.shape[self._dim_cat]
if nb_pre + size_cat < start:
# discarded volumes at the beginning
nb_pre += size_cat
continue
if nb_pre < start:
# first volume
kept_arrays.append(array)
starts.append(start - nb_pre)
elif index_cat.stop is None or nb_pre < index_cat.stop:
# other kept volume
kept_arrays.append(array)
skip = size_since_start - (size_since_start // step) * step
starts.append(skip)
# compute stopping point
nb_elem_prev = size_since_start // step
nb_elem_remaining = nb_elem_total - nb_elem_prev
nb_elem_this_volume = (size_cat - starts[-1]) // step
if nb_elem_remaining <= nb_elem_this_volume:
# last volume
stops.append(nb_elem_remaining)
break
# read as much as possible
size_since_start += size_cat
nb_pre += size_cat
stops.append(None)
continue
# slice kept arrays
arrays = []
for array, start, stop in zip(kept_arrays, starts, stops):
index[map_dim_cat] = slice(start, stop, step)
arrays.append(array[tuple(index)])
# create new CatArray
new = copy(self)
new._arrays = arrays
new._dim_cat = new_dim_cat
new.shape = new_shape
return new
def permute(self, dims):
# overload permutation -> permute individual arrays
new = copy(self)
new._arrays = [array.permute(dims) for array in new._arrays]
iperm = invert_permutation(dims)
new._dim_cat = iperm[new._dim_cat]
new.shape = tuple(self.shape[d] for d in dims)
return new
def data(self, *args, **kwargs):
# read individual arrays and concatenate them
# TODO: it would be more efficient to preallocate the whole
# array and pass the appropriate buffer to each reader but
# (1) we don't have the option to provide a buffer yet
# (2) everything's already quite inefficient
dats = [array.data(*args, **kwargs) for array in self._arrays]
print([dat.shape for dat in dats])
return volutils.cat(dats, dim=self._dim_cat)
def fdata(self, *args, **kwargs):
# read individual arrays and concatenate them
# TODO: it would be more efficient to preallocate the whole
# array and pass the appropriate buffer to each reader but
# (1) we don't have the option to provide a buffer yet
# (2) everything's already quite inefficient
dats = [array.fdata(*args, **kwargs) for array in self._arrays]
return volutils.cat(dats, dim=self._dim_cat)
def set_data(self, dat, *args, **kwargs):
# slice the input data and write it into each array
size_prev = 0
index = [None] * self.dim
for array in self._arrays:
size_cat = array.shape[self._dim_cat]
index[self._dim_cat] = slice(size_prev, size_prev + size_cat)
array._set_data(dat[tuple(index)], *args, **kwargs)
def set_fdata(self, dat, *args, **kwargs):
# slice the input data and write it into each array
size_prev = 0
index = [None] * self.dim
for array in self._arrays:
size_cat = array.shape[self._dim_cat]
index[self._dim_cat] = slice(size_prev, size_prev + size_cat)
array._set_fdata(dat[tuple(index)], *args, **kwargs)
def metadata(self, *args, **kwargs):
return tuple(array.metadata(*args, **kwargs) for array in self._arrays)
def set_metadata(self, **meta):
raise NotImplementedError('Cannot write metadata into concatenated '
'array')
def cat(arrays, dim=0):
"""Concatenate mapped arrays along a dimension.
Parameters
----------
arrays : sequence[MappedArray]
Arrays to concatenate. Their shapes should be identical
except along dimension `dim`.
dim : int, default=0
Dimension along white to concatenate the arrays
Returns
-------
CatArray
A symbolic concatenation of all input arrays.
Its shape along dimension `dim` is the sum of all input shapes
along dimension `dim`.
"""
return CatArray(arrays, dim)
def stack(arrays, dim=0):
"""Stack mapped arrays along a dimension.
Parameters
----------
arrays : sequence[MappedArray]
Arrays to concatenate. Their shapes should be identical
except along dimension `dim`.
dim : int, default=0
Dimension along white to concatenate the arrays
Returns
-------
CatArray
A symbolic stack of all input arrays.
"""
arrays = [array.unsqueeze(dim=dim) for array in arrays]
return cat(arrays, dim=dim)
| 9,800
| 0
| 378
|
1a2deaef0215145916e743664ab5b8b9ed9d9543
| 302
|
py
|
Python
|
blit.py
|
rwberendsen/blit
|
f025a286b04774ec6dc6a47823254484d3942b78
|
[
"MIT"
] | null | null | null |
blit.py
|
rwberendsen/blit
|
f025a286b04774ec6dc6a47823254484d3942b78
|
[
"MIT"
] | null | null | null |
blit.py
|
rwberendsen/blit
|
f025a286b04774ec6dc6a47823254484d3942b78
|
[
"MIT"
] | null | null | null |
"""
blit.py
Call if you want to run everything
"""
import json
import os
import sys
import integrate
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 10.785714
| 39
| 0.63245
|
"""
blit.py
Call if you want to run everything
"""
import json
import os
import sys
import integrate
def main(argv):
with open('config.json', 'r') as f:
config = json.load(f)
integrate.integrate(**config)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 112
| 0
| 23
|
33a51d04c0e22dbd80245e03d033a309d7a8fdfd
| 367
|
py
|
Python
|
pacote dowlond/curso python/exercicio100.py
|
Kaue-Marin/Curso-Python
|
45f7920e288a49724a4284f14c7212bb1662ab5b
|
[
"MIT"
] | null | null | null |
pacote dowlond/curso python/exercicio100.py
|
Kaue-Marin/Curso-Python
|
45f7920e288a49724a4284f14c7212bb1662ab5b
|
[
"MIT"
] | null | null | null |
pacote dowlond/curso python/exercicio100.py
|
Kaue-Marin/Curso-Python
|
45f7920e288a49724a4284f14c7212bb1662ab5b
|
[
"MIT"
] | null | null | null |
from random import randint
numeros = []
# programa principal
sorteia()
somapar()
| 22.9375
| 47
| 0.577657
|
from random import randint
numeros = []
def sorteia():
for c in range(1, 5):
c = randint(1, 9)
numeros.append(c)
print(f'os valores da lista são {numeros}')
def somapar():
spar = 0
for c2 in numeros:
if c2 % 2 == 0:
spar += c2
print(f'a soma dos numeros pares é {spar}')
# programa principal
sorteia()
somapar()
| 245
| 0
| 44
|
7f79dcf3d85037aa0b27e51ab5ee77202b2f17ac
| 3,802
|
py
|
Python
|
ch03/pro1.py
|
Lucid-ak/deeplearnig_practice
|
e196d733ee9b910a9c7648e61e6934aea9d255b3
|
[
"MIT"
] | null | null | null |
ch03/pro1.py
|
Lucid-ak/deeplearnig_practice
|
e196d733ee9b910a9c7648e61e6934aea9d255b3
|
[
"MIT"
] | null | null | null |
ch03/pro1.py
|
Lucid-ak/deeplearnig_practice
|
e196d733ee9b910a9c7648e61e6934aea9d255b3
|
[
"MIT"
] | null | null | null |
import pickle
import numpy as np #비선형 퍼셉트론
import matplotlib.pylab as plt
import sys, os
sys.path.append(os.pardir)
from dataset.mnist import load_mnist
from PIL import Image
def step_function(x):
'''
y = x > 0
return y.astype(np.int) #np.int와 dtype=int의 역할은 같다.
'''
return np.array(x>0, dtype=int) #dtype의 역할은 출력 결과를 dtype=int등으로 통해 원하는 자료형으로 변형하는 것
'''
network=init_network()
x=np.array([100,40])
y=forward(network, x)
print(y)
#print(y)
#plt.plot(x,y)
#plt.ylim(-0.1,1.1)
#plt.show()
'''
x_test,t_test = get_data()
network=init_networrk_mnist()
batch_size=100
accuracy_ct=0
for i in range(0, len(x_test), batch_size):#x_train의 실제 개수 몰라도 len함수 쓰면 된다
x_batch = x_test[i:i+batch_size]
y_batch = predict(network, x_batch)
p = np.argmax(y_batch, axis=1) #가장 확률이 높은 원소 가져오기?
print(np.sum(p == t_test[i:i+batch_size]))
accuracy_ct += np.sum(p == t_test[i:i+batch_size])
'''
y=predict(network, x_test[i])
p=np.argmax(y)
if p==t_test[i] :
accuracy_ct+=1
'''
print("Accuracy:",str(float(accuracy_ct)/len(x_test)))
print(accuracy_ct)
'''
img = x_train[0]
label = t_train[0]
print(label)
print(img.shape)
img = img.reshape(28,28) #이거 패턴화 28,28로 안하면 원하는 이미지 안나온다.-> 이거 이용해서 암호화나 용량 줄이기도 가능?
print(img.shape)
img_show(img)
'''
| 22.104651
| 99
| 0.584955
|
import pickle
import numpy as np #비선형 퍼셉트론
import matplotlib.pylab as plt
import sys, os
sys.path.append(os.pardir)
from dataset.mnist import load_mnist
from PIL import Image
def AND(x1, x2):
x=np.array([x1,x2])
w=np.array([0.5, 0.5])
b= -0.7
theta = 0
tmp = np.sum(w*x)+b
if tmp<=theta:
return 0
elif tmp>theta:
return 1
def OR(x1, x2):
x = np.array([x1,x2])
w = np.array([0.5,0.5])
b = -0.3
theta = 0
sig=np.sum(w*x)+b
if sig>=theta:
return 1
else :
return 0
def NAND(x1, x2):
x=np.array([x1,x2])
w=np.array([0.5, 0.5])
b=-0.7
theta=0
sig=np.sum(w*x)+b
if sig<theta :
return 1
else :
return 0
def XOR(x1, x2):
y1=OR(x1,x2)
y2=NAND(x1,x2)
y=AND(y1,y2)
return y
def step_function(x):
'''
y = x > 0
return y.astype(np.int) #np.int와 dtype=int의 역할은 같다.
'''
return np.array(x>0, dtype=int) #dtype의 역할은 출력 결과를 dtype=int등으로 통해 원하는 자료형으로 변형하는 것
def sigmoid(x):
return 1/(1+np.exp(-x)) #브로드 캐스트 적용, 각 배열의 원소값에 대해 계산 후 결과값들을 배열로 변환
def ReLU(x):
return np.array(np.maximum(0, x))
def softmax(x): #c는 입력 값 중 최대
c=np.max(x)
exp_x=np.exp(x-c)
sum_exp_x=sum(exp_x)
y = exp_x/sum_exp_x #return exp_x/sum_exp_x로 바로 나타낼 수도 있지만 "가시성"을 위해 y로 따로 배정하여 계산
return y
def identity_function(x):
return x
def init_network(): #network 배열에 라벨링을 통해 각 가중치 및 편향 저장
network={}
network['W1']=np.array([[0.1,0.3,0.5],[0.2,0.4,0.6]])
network['b1']=np.array([0.1, 0.2, 0.3])
network['W2']=np.array([[0.1,0.4],[0.2,0.5],[0.3,0.6]])
network['b2']=np.array([0.1,0.2])
network['W3']=np.array([[0.1,0.3],[0.2,0.4]])
network['b3']=np.array([0.1,0.2])
return network
def init_networrk_mnist(): #라이브러리에서 weight, bias 가져오기
with open("sample_weight.pkl",'rb') as f:
network = pickle.load(f)
return network
def predict(network, x):
W1,W2, W3= network['W1'], network['W2'], network['W3']
b1,b2,b3 = network['b1'],network['b2'],network['b3']
a1 = np.dot(x,W1)+b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = sigmoid(a2)
a3 = np.dot(z2, W3) + b3
y=softmax(a3)
return y
def forward(network, x): #순방향(입력->출력) 구현 항상 비슷한 값을 도출한다.
W1,W2, W3= network['W1'], network['W2'], network['W3']
b1,b2,b3=network['b1'],network['b2'],network['b3']
a1 = np.dot(x,W1)+b1
z1 = softmax(a1)
a2 = np.dot(z1, W2) + b2
z2 = softmax(a2)
a3 = np.dot(z2, W3) + b3
y=identity_function(a3)
return y
def img_show(img):
pil_img=Image.fromarray(np.uint8(img))
pil_img.show()
def get_data():
(x_train, t_train), (x_test, t_test) = \
load_mnist(flatten=True, normalize=True, one_hot_label=False)
return x_test, t_test
'''
network=init_network()
x=np.array([100,40])
y=forward(network, x)
print(y)
#print(y)
#plt.plot(x,y)
#plt.ylim(-0.1,1.1)
#plt.show()
'''
x_test,t_test = get_data()
network=init_networrk_mnist()
batch_size=100
accuracy_ct=0
for i in range(0, len(x_test), batch_size):#x_train의 실제 개수 몰라도 len함수 쓰면 된다
x_batch = x_test[i:i+batch_size]
y_batch = predict(network, x_batch)
p = np.argmax(y_batch, axis=1) #가장 확률이 높은 원소 가져오기?
print(np.sum(p == t_test[i:i+batch_size]))
accuracy_ct += np.sum(p == t_test[i:i+batch_size])
'''
y=predict(network, x_test[i])
p=np.argmax(y)
if p==t_test[i] :
accuracy_ct+=1
'''
print("Accuracy:",str(float(accuracy_ct)/len(x_test)))
print(accuracy_ct)
'''
img = x_train[0]
label = t_train[0]
print(label)
print(img.shape)
img = img.reshape(28,28) #이거 패턴화 28,28로 안하면 원하는 이미지 안나온다.-> 이거 이용해서 암호화나 용량 줄이기도 가능?
print(img.shape)
img_show(img)
'''
| 2,368
| 0
| 319
|
206b2d2a2c251900c661943dfaa5e9366d3668b1
| 9,055
|
py
|
Python
|
slideatlas/security/blueprint.py
|
SlideAtlas/SlideAtlas-Server
|
3b9cbd56eaa29ae08ae521e75616ea230fe26397
|
[
"Apache-2.0"
] | 3
|
2015-10-10T10:17:26.000Z
|
2020-12-14T09:42:19.000Z
|
slideatlas/security/blueprint.py
|
SlideAtlas/SlideAtlas-Server
|
3b9cbd56eaa29ae08ae521e75616ea230fe26397
|
[
"Apache-2.0"
] | 41
|
2015-02-03T19:47:28.000Z
|
2017-02-06T23:24:26.000Z
|
slideatlas/security/blueprint.py
|
SlideAtlas/SlideAtlas-Server
|
3b9cbd56eaa29ae08ae521e75616ea230fe26397
|
[
"Apache-2.0"
] | 2
|
2016-04-04T18:23:27.000Z
|
2017-11-14T22:34:58.000Z
|
# coding=utf-8
import copy
from flask import Markup, url_for
from flask.ext.security import Security, MongoEngineUserDatastore, user_registered
from flask.ext.security.core import _SecurityState
from flask.ext.security.core import _context_processor as security_default_context_processor
from flask.ext.security.views import create_blueprint as security_create_blueprint
from flask.ext.security.views import send_confirmation as security_send_confirmation
from flask.ext.security.utils import send_mail
from slideatlas import models
from . import forms, views, login_provider
from .principal import register_principal
################################################################################
__all__ = ('blueprint', 'register_with_app')
################################################################################
################################################################################
# TODO: find a way of automatically registering Shibboleth users with the
# appropriate group, similar to facebook_id
################################################################################
def add_config(app):
"""
Set Flask application configuration options.
These are options that should never change.
"""
# Flask-Security configuration
app.config.update(
### Frontend ###
SECURITY_FLASH_MESSAGES=True,
SECURITY_LOGIN_URL='/login',
SECURITY_LOGIN_USER_TEMPLATE='security/login.html',
SECURITY_MSG_DISABLED_ACCOUNT=('Password login is disabled for this account.', 'error'),
SECURITY_LOGOUT_URL='/logout',
# TODO: change '/sessions' to an endpoint name
SECURITY_POST_LOGIN_VIEW='/sessions',
SECURITY_POST_LOGOUT_VIEW='home',
### Password login options ###
SECURITY_DEFAULT_REMEMBER_ME=False,
## New account registration
SECURITY_REGISTERABLE=True,
SECURITY_REGISTER_URL='/login/password/register',
SECURITY_REGISTER_USER_TEMPLATE='security/register.html',
SECURITY_SEND_REGISTER_EMAIL=True,
SECURITY_EMAIL_SUBJECT_REGISTER='SlideAtlas: Account Created',
# uses 'welcome' email body template
# TODO: change the email body template, as the default contains a password confirmation link, and we want non-password users to receive a welcome email too
## Confirmation of user's email address
SECURITY_CONFIRMABLE=True,
SECURITY_CONFIRM_URL='/login/password/confirm',
SECURITY_SEND_CONFIRMATION_TEMPLATE='security/resend_confirmation.html',
SECURITY_EMAIL_SUBJECT_CONFIRM='SlideAtlas: Account Confirmation',
# uses 'confirmation_instructions' email body template
SECURITY_CONFIRM_EMAIL_WITHIN='5 days',
SECURITY_LOGIN_WITHOUT_CONFIRMATION=False,
SECURITY_MSG_EMAIL_CONFIRMED=(
Markup(
'Welcome to SlideAtlas! Your account has been confirmed.<br>'
'<br>'
'Site administrators may now grant you access to additional content. '
'You can also contact <a href="mailto:%(email)s">%(email)s</a> with any requests.' %
{'email': app.config['SLIDEATLAS_ADMIN_EMAIL']}
),
'success'),
## Recover / reset a lost password
SECURITY_RECOVERABLE=True,
SECURITY_RESET_URL='/login/password/reset',
SECURITY_FORGOT_PASSWORD_TEMPLATE='security/password_reset_1.html', # step 1
SECURITY_RESET_PASSWORD_TEMPLATE='security/password_reset_2.html', # step 2
SECURITY_EMAIL_SUBJECT_PASSWORD_RESET='SlideAtlas: Password Reset Instructions',
# uses 'reset_instructions' email body template
SECURITY_RESET_PASSWORD_WITHIN='5 days',
SECURITY_SEND_PASSWORD_RESET_NOTICE_EMAIL=False, # TODO: do we want to send a confirmation email?
SECURITY_EMAIL_SUBJECT_PASSWORD_NOTICE='SlideAtlas: Password Reset Successful',
# uses 'reset_notice' email body template
## Change a password
SECURITY_CHANGEABLE=True,
SECURITY_CHANGE_URL='/login/password/change',
SECURITY_CHANGE_PASSWORD_TEMPLATE='security/password_change.html',
SECURITY_SEND_PASSWORD_CHANGE_EMAIL=False, # TODO: do we want to send a confirmation email?
SECURITY_EMAIL_SUBJECT_PASSWORD_CHANGE_NOTICE='SlideAtlas: Password Change Successful',
# uses 'change notice' email body template
### Other options ###
SECURITY_TRACKABLE=True, # record login statistics in User model
SECURITY_PASSWORDLESS=False, # an experimental feature
# custom salts can also be set for several other tokens, but this shouldn't be necessary
# TODO: there are a few other undocumented config settings in Flask-Security, explore them
)
# Flask-Login configuration
app.config.update(
SESSION_PROTECTION='basic', # some extra security for cookies, see documentation for details
REMEMBER_COOKIE_DOMAIN=app.session_interface.get_cookie_domain(app),
REMEMBER_COOKIE_HTTPONLY=True,
REMEMBER_COOKIE_SECURE=app.config['SLIDEATLAS_HTTPS'],
)
################################################################################
################################################################################
| 45.049751
| 163
| 0.661182
|
# coding=utf-8
import copy
from flask import Markup, url_for
from flask.ext.security import Security, MongoEngineUserDatastore, user_registered
from flask.ext.security.core import _SecurityState
from flask.ext.security.core import _context_processor as security_default_context_processor
from flask.ext.security.views import create_blueprint as security_create_blueprint
from flask.ext.security.views import send_confirmation as security_send_confirmation
from flask.ext.security.utils import send_mail
from slideatlas import models
from . import forms, views, login_provider
from .principal import register_principal
################################################################################
__all__ = ('blueprint', 'register_with_app')
################################################################################
def register_with_app(app):
add_config(app)
security, blueprint = create_security(app)
register_principal(app, security)
login_provider.add_views(app, blueprint)
# TODO: move the 'site_url' value to config file
security.mail_context_processor(lambda: dict(site_url='https://slide-atlas.org/'))
# TODO: make logins timeout
# may use the 'flask.ext.login.user_loaded_from_*' signals for this, to update the timeout
# furthermore, see the documentation 'flask.ext.login.needs_refresh', and implement re-login
# redirection directly to the user's corresponding login provider if a user's session becomes stale
user_registered.connect(on_user_registered, app)
################################################################################
# TODO: find a way of automatically registering Shibboleth users with the
# appropriate group, similar to facebook_id
def on_user_registered(app, user, confirm_token):
if isinstance(user, models.ShibbolethUser) or user.email.endswith('brown.edu'):
brown_group = models.Group.objects.with_id('529d244959a3aee20f8a00ae')
user.groups.append(brown_group)
user.save()
send_mail(
'SlideAtlas: New User Registered',
app.config['SLIDEATLAS_ADMIN_EMAIL'],
'new_user_notify',
user=user,
admin_user_url=url_for('%sview.edit_view' % user.__class__.__name__.lower(),
id=str(user.id),
_external=True)
)
################################################################################
def add_config(app):
"""
Set Flask application configuration options.
These are options that should never change.
"""
# Flask-Security configuration
app.config.update(
### Frontend ###
SECURITY_FLASH_MESSAGES=True,
SECURITY_LOGIN_URL='/login',
SECURITY_LOGIN_USER_TEMPLATE='security/login.html',
SECURITY_MSG_DISABLED_ACCOUNT=('Password login is disabled for this account.', 'error'),
SECURITY_LOGOUT_URL='/logout',
# TODO: change '/sessions' to an endpoint name
SECURITY_POST_LOGIN_VIEW='/sessions',
SECURITY_POST_LOGOUT_VIEW='home',
### Password login options ###
SECURITY_DEFAULT_REMEMBER_ME=False,
## New account registration
SECURITY_REGISTERABLE=True,
SECURITY_REGISTER_URL='/login/password/register',
SECURITY_REGISTER_USER_TEMPLATE='security/register.html',
SECURITY_SEND_REGISTER_EMAIL=True,
SECURITY_EMAIL_SUBJECT_REGISTER='SlideAtlas: Account Created',
# uses 'welcome' email body template
# TODO: change the email body template, as the default contains a password confirmation link, and we want non-password users to receive a welcome email too
## Confirmation of user's email address
SECURITY_CONFIRMABLE=True,
SECURITY_CONFIRM_URL='/login/password/confirm',
SECURITY_SEND_CONFIRMATION_TEMPLATE='security/resend_confirmation.html',
SECURITY_EMAIL_SUBJECT_CONFIRM='SlideAtlas: Account Confirmation',
# uses 'confirmation_instructions' email body template
SECURITY_CONFIRM_EMAIL_WITHIN='5 days',
SECURITY_LOGIN_WITHOUT_CONFIRMATION=False,
SECURITY_MSG_EMAIL_CONFIRMED=(
Markup(
'Welcome to SlideAtlas! Your account has been confirmed.<br>'
'<br>'
'Site administrators may now grant you access to additional content. '
'You can also contact <a href="mailto:%(email)s">%(email)s</a> with any requests.' %
{'email': app.config['SLIDEATLAS_ADMIN_EMAIL']}
),
'success'),
## Recover / reset a lost password
SECURITY_RECOVERABLE=True,
SECURITY_RESET_URL='/login/password/reset',
SECURITY_FORGOT_PASSWORD_TEMPLATE='security/password_reset_1.html', # step 1
SECURITY_RESET_PASSWORD_TEMPLATE='security/password_reset_2.html', # step 2
SECURITY_EMAIL_SUBJECT_PASSWORD_RESET='SlideAtlas: Password Reset Instructions',
# uses 'reset_instructions' email body template
SECURITY_RESET_PASSWORD_WITHIN='5 days',
SECURITY_SEND_PASSWORD_RESET_NOTICE_EMAIL=False, # TODO: do we want to send a confirmation email?
SECURITY_EMAIL_SUBJECT_PASSWORD_NOTICE='SlideAtlas: Password Reset Successful',
# uses 'reset_notice' email body template
## Change a password
SECURITY_CHANGEABLE=True,
SECURITY_CHANGE_URL='/login/password/change',
SECURITY_CHANGE_PASSWORD_TEMPLATE='security/password_change.html',
SECURITY_SEND_PASSWORD_CHANGE_EMAIL=False, # TODO: do we want to send a confirmation email?
SECURITY_EMAIL_SUBJECT_PASSWORD_CHANGE_NOTICE='SlideAtlas: Password Change Successful',
# uses 'change notice' email body template
### Other options ###
SECURITY_TRACKABLE=True, # record login statistics in User model
SECURITY_PASSWORDLESS=False, # an experimental feature
# custom salts can also be set for several other tokens, but this shouldn't be necessary
# TODO: there are a few other undocumented config settings in Flask-Security, explore them
)
# Flask-Login configuration
app.config.update(
SESSION_PROTECTION='basic', # some extra security for cookies, see documentation for details
REMEMBER_COOKIE_DOMAIN=app.session_interface.get_cookie_domain(app),
REMEMBER_COOKIE_HTTPONLY=True,
REMEMBER_COOKIE_SECURE=app.config['SLIDEATLAS_HTTPS'],
)
################################################################################
def create_security(app):
# register Flask-Security with app and get blueprint
security = Security(app, SlideatlasMongoEngineUserDatastore(),
register_blueprint=False,
confirm_register_form=forms.RegisterForm,
login_form=forms.LoginForm)
# prevent Flask-Security from automatically creating register and confirm views
# by calling 'security_create_blueprint' with a different state
security_blueprint_state = copy.copy(security._state)
security_blueprint_state.registerable = False
security_blueprint_state.confirmable = False
blueprint = security_create_blueprint(security_blueprint_state, 'flask_security.core')
# add SlideAtlas's own register view, which doesn't immediately require a password
blueprint.add_url_rule(security.register_url,
endpoint='register',
view_func=views.register,
methods=['GET', 'POST'])
# use the Flask-Security's built-in view for re-sending a confirmation, which
# needs to be manually added, since 'confirmable' was set to False
blueprint.add_url_rule(security.confirm_url,
endpoint='send_confirmation',
view_func=security_send_confirmation,
methods=['GET', 'POST'])
# add SlideAtlas's own confirm view, which requires the user to set a password
blueprint.add_url_rule(security.confirm_url + '/<token>',
endpoint='confirm_email',
view_func=views.confirm_email,
methods=['GET', 'POST'])
# do work that Flask-Security would have done if 'register_blueprint' were True
app.register_blueprint(blueprint)
app.context_processor(security_default_context_processor)
return security, blueprint
################################################################################
class SlideatlasMongoEngineUserDatastore(MongoEngineUserDatastore):
def __init__(self):
# 'db' parameter is not necessary for this subclass
super(SlideatlasMongoEngineUserDatastore, self).__init__(None, models.User, None)
self.user_creation_model = models.PasswordUser
def create_user(self, **kwargs):
"""Creates and returns a new user from the given parameters."""
user = self.user_creation_model(**kwargs)
return self.put(user)
| 3,373
| 262
| 88
|
6e1958f96728d11d2e7418e4925be857a7286b3c
| 1,616
|
py
|
Python
|
flight/views.py
|
NedyalkoKr/airline
|
d704e8cd98901dc4bb0bf672cc2363432ada3f84
|
[
"MIT"
] | null | null | null |
flight/views.py
|
NedyalkoKr/airline
|
d704e8cd98901dc4bb0bf672cc2363432ada3f84
|
[
"MIT"
] | null | null | null |
flight/views.py
|
NedyalkoKr/airline
|
d704e8cd98901dc4bb0bf672cc2363432ada3f84
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.urls import reverse
from django.http import Http404, HttpResponseRedirect
from flight.models import Flight, Passenger
def index(request):
''' display all flights '''
context = {
'main_header': 'Flights',
'title': 'Flights',
'flights': Flight.objects.all()
}
return render(request, 'flight/index.html', context)
def flight(request, flight_id):
''' return individual flight details and passengers on this flight'''
try:
flight = Flight.objects.get(pk=flight_id)
except Flight.DoesNotExist:
raise Http404(f'Flight {flight} does not exist.')
context = {
'flight': flight,
'passengers': flight.passengers.all(),
'non_passengers': Passenger.objects.exclude(flight=flight).all(),
'number_of_passengers': flight.passengers.count()
}
return render(request, 'flight/flight.html', context)
| 36.727273
| 99
| 0.678218
|
from django.shortcuts import render
from django.urls import reverse
from django.http import Http404, HttpResponseRedirect
from flight.models import Flight, Passenger
def index(request):
''' display all flights '''
context = {
'main_header': 'Flights',
'title': 'Flights',
'flights': Flight.objects.all()
}
return render(request, 'flight/index.html', context)
def flight(request, flight_id):
''' return individual flight details and passengers on this flight'''
try:
flight = Flight.objects.get(pk=flight_id)
except Flight.DoesNotExist:
raise Http404(f'Flight {flight} does not exist.')
context = {
'flight': flight,
'passengers': flight.passengers.all(),
'non_passengers': Passenger.objects.exclude(flight=flight).all(),
'number_of_passengers': flight.passengers.count()
}
return render(request, 'flight/flight.html', context)
def book(request, flight_id):
try:
passenger_id = int(request.POST['passenger'])
passenger = Passenger.objects.get(pk=passenger_id)
flight = Flight.objects.get(pk=flight_id)
except KeyError:
return render(request, 'flight/error.html', {'message': 'No passenger selected'})
except Flight.DoesNotExist:
return render(request, 'flight/error.html', {'message': 'No such flight exist'})
except Passenger.DoesNotExist:
return render(request, 'flight/error.html', {'message': 'No passenger with that id exist'})
passenger.flight.add(flight)
return HttpResponseRedirect(reverse('flight', args=(flight_id,)))
| 651
| 0
| 23
|
c2f3cfc4cf7bad08a1bd21dc39bb6765de3670b2
| 419
|
py
|
Python
|
setup1.py
|
Alexander437/Learning_repo
|
4e40ad419f8117d014f789119f4b3583067020bb
|
[
"CC0-1.0"
] | null | null | null |
setup1.py
|
Alexander437/Learning_repo
|
4e40ad419f8117d014f789119f4b3583067020bb
|
[
"CC0-1.0"
] | null | null | null |
setup1.py
|
Alexander437/Learning_repo
|
4e40ad419f8117d014f789119f4b3583067020bb
|
[
"CC0-1.0"
] | null | null | null |
from setuptools import setup, find_packages, Extension
from torch.utils import cpp_extension
setup(
name='my_lib',
version='0.0',
description='Learning setup',
packages=find_packages(),
ext_package='trt_pose',
ext_modules=[cpp_extension.CppExtension('plugins', [
'Learn_cpp/learn.cpp',
])],
cmdclass={'build_ext': cpp_extension.BuildExtension},
install_requires=[
],
)
| 23.277778
| 57
| 0.687351
|
from setuptools import setup, find_packages, Extension
from torch.utils import cpp_extension
setup(
name='my_lib',
version='0.0',
description='Learning setup',
packages=find_packages(),
ext_package='trt_pose',
ext_modules=[cpp_extension.CppExtension('plugins', [
'Learn_cpp/learn.cpp',
])],
cmdclass={'build_ext': cpp_extension.BuildExtension},
install_requires=[
],
)
| 0
| 0
| 0
|
8678ddca56a8e9b76f05e9a0a06fe329c6224b43
| 8,342
|
py
|
Python
|
bookops_callno/normalizer.py
|
BookOps-CAT/bookops-callno
|
a8f1d2744b3b53844dc97a5400ae87a2db92cd4c
|
[
"MIT"
] | null | null | null |
bookops_callno/normalizer.py
|
BookOps-CAT/bookops-callno
|
a8f1d2744b3b53844dc97a5400ae87a2db92cd4c
|
[
"MIT"
] | null | null | null |
bookops_callno/normalizer.py
|
BookOps-CAT/bookops-callno
|
a8f1d2744b3b53844dc97a5400ae87a2db92cd4c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from typing import Optional
from pymarc import Field
from unidecode import unidecode, UnidecodeError
from bookops_callno.errors import CallNoConstructorError
def remove_trailing_punctuation(value: str) -> str:
"""
Removes any trailing periods, commas, etc.
Args:
value: string to be processed
Returns:
value
"""
if not isinstance(value, str):
raise CallNoConstructorError(
"Invalid 'value' type used in argument. Must be a string."
)
while value[-1] in ".,:;-() ":
value = value[:-1]
return value
def normalize_value(value: str) -> str:
"""
Removes diacritics from string and changes to uppercase
"""
if not value:
return ""
elif not isinstance(value, str):
raise CallNoConstructorError(
"Invalid 'value' type used in argument. Must be a string."
)
try:
value = value.replace("\u02b9", "") # Russian: modifier letter prime
value = value.replace("\u02bb", "") # Arabic modifier letter turned comma
value = value.replace("'", "")
value = unidecode(value, errors="strict")
value = remove_trailing_punctuation(value).upper()
return value
except UnidecodeError as exc:
raise CallNoConstructorError(
f"Unsupported character encountered. Error: '{exc}'."
)
def corporate_name_first_word(field: Field = None) -> Optional[str]:
"""
Returns the uppdercase first word of the corporate entity from
the 110 MARC tag
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "110":
return None
words = field["a"].strip().split(" ")
name = normalize_value(words[0])
return name
def corporate_name_full(field: Field = None) -> Optional[str]:
"""
Returns an uppercase full name of corporate entity.
Uses the 110 MARC tag
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag not in ("110", "610"):
return None
phrases = field["a"].strip().split("(")
name = normalize_value(phrases[0])
return name
def corporate_name_initial(field: Field = None) -> Optional[str]:
"""
Returns the uppercase first letter of the corporate entity
based on the 110 MARC tag
Args:
field: pymarc.Field instance
Returns:
initial
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "110":
return None
name = field["a"]
name = normalize_value(name)
initial = name[0]
return initial
def personal_name_initial(field: Field = None) -> Optional[str]:
"""
Returns the first letter of the last name of a personal author
Args:
field: pymarc.Field instance
Returns
initial
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "100":
return None
name = field["a"].strip()
name = normalize_value(name)
initial = name[0]
return initial
def personal_name_surname(field: Field = None) -> Optional[str]:
"""
Returns an uppercase surname of personal author. Includes any numeration from
the subield $b of 100 or 600 MARC tag.
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag not in ("100", "600"):
return None
elif field.indicator1 not in ("0", "1"):
return None
sub_a = field["a"].strip()
# include subfield $b if present
try:
sub_b = field["b"].strip()
name = f"{sub_a} {sub_b}"
except AttributeError:
name = sub_a
name = normalize_value(name)
# stop at comma to select surname
try:
stop = name.index(",")
name = name[:stop]
except ValueError:
pass
return name
def subject_corporate_name(field: Field = None) -> Optional[str]:
"""
Returns an uppercase corporate name to be used in subject segment
of the call number based on MARC tag 610
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "610":
return None
name = corporate_name_full(field)
return name
def subject_family_name(field: Field = None) -> Optional[str]:
"""
Returns an uppercase family name based on the 600 MARC tag
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "600":
return None
elif field.indicator1 != "3":
return None
try:
stop = field["a"].index("family")
name = field["a"][:stop]
except ValueError:
return None
name = normalize_value(name)
return name
def subject_personal_name(field: Field = None) -> Optional[str]:
"""
Returns personal name to be used in subject segment of the call
number. Use for biography or Dewey + Name patters, examples:
biography: B LOUIS XIV C
criticizm of works of an author: 813 ADAMS C
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "600":
return None
name = personal_name_surname(field)
return name
def subject_topic(field: Field = None) -> Optional[str]:
"""
Returns an uppercase topic to be used in the subject segment of the call
number based on MARC tag 650. Valid only for BPL call numbers.
Examples: programming language, name of operating system, etc.
Args:
field: pymarc.Field instance
Returns:
topic
"""
pass
def title_first_word(field: Field = None) -> Optional[str]:
"""
Returns an uppercase first word (skipping any articles) of
the title field (245 MARC tag subfield $a).
Args:
field: pymarc.Field instance
Returns:
word
"""
pass
def title_initial(field: Field = None) -> Optional[str]:
"""
Returns an uppercase initial (skipping any articles) of
the title field (245 MARC tag subfield $a).
Args:
field: pymarc.Field instance
Returns:
initial
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "245":
return None
try:
ind2 = int(field.indicator2)
except ValueError:
return None
title = field["a"][ind2:]
title = normalize_value(title)
initial = title[0]
return initial
| 23.902579
| 82
| 0.593743
|
# -*- coding: utf-8 -*-
from typing import Optional
from pymarc import Field
from unidecode import unidecode, UnidecodeError
from bookops_callno.errors import CallNoConstructorError
def remove_trailing_punctuation(value: str) -> str:
"""
Removes any trailing periods, commas, etc.
Args:
value: string to be processed
Returns:
value
"""
if not isinstance(value, str):
raise CallNoConstructorError(
"Invalid 'value' type used in argument. Must be a string."
)
while value[-1] in ".,:;-() ":
value = value[:-1]
return value
def normalize_value(value: str) -> str:
"""
Removes diacritics from string and changes to uppercase
"""
if not value:
return ""
elif not isinstance(value, str):
raise CallNoConstructorError(
"Invalid 'value' type used in argument. Must be a string."
)
try:
value = value.replace("\u02b9", "") # Russian: modifier letter prime
value = value.replace("\u02bb", "") # Arabic modifier letter turned comma
value = value.replace("'", "")
value = unidecode(value, errors="strict")
value = remove_trailing_punctuation(value).upper()
return value
except UnidecodeError as exc:
raise CallNoConstructorError(
f"Unsupported character encountered. Error: '{exc}'."
)
def corporate_name_first_word(field: Field = None) -> Optional[str]:
"""
Returns the uppdercase first word of the corporate entity from
the 110 MARC tag
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "110":
return None
words = field["a"].strip().split(" ")
name = normalize_value(words[0])
return name
def corporate_name_full(field: Field = None) -> Optional[str]:
"""
Returns an uppercase full name of corporate entity.
Uses the 110 MARC tag
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag not in ("110", "610"):
return None
phrases = field["a"].strip().split("(")
name = normalize_value(phrases[0])
return name
def corporate_name_initial(field: Field = None) -> Optional[str]:
"""
Returns the uppercase first letter of the corporate entity
based on the 110 MARC tag
Args:
field: pymarc.Field instance
Returns:
initial
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "110":
return None
name = field["a"]
name = normalize_value(name)
initial = name[0]
return initial
def personal_name_initial(field: Field = None) -> Optional[str]:
"""
Returns the first letter of the last name of a personal author
Args:
field: pymarc.Field instance
Returns
initial
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "100":
return None
name = field["a"].strip()
name = normalize_value(name)
initial = name[0]
return initial
def personal_name_surname(field: Field = None) -> Optional[str]:
"""
Returns an uppercase surname of personal author. Includes any numeration from
the subield $b of 100 or 600 MARC tag.
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag not in ("100", "600"):
return None
elif field.indicator1 not in ("0", "1"):
return None
sub_a = field["a"].strip()
# include subfield $b if present
try:
sub_b = field["b"].strip()
name = f"{sub_a} {sub_b}"
except AttributeError:
name = sub_a
name = normalize_value(name)
# stop at comma to select surname
try:
stop = name.index(",")
name = name[:stop]
except ValueError:
pass
return name
def subject_corporate_name(field: Field = None) -> Optional[str]:
"""
Returns an uppercase corporate name to be used in subject segment
of the call number based on MARC tag 610
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "610":
return None
name = corporate_name_full(field)
return name
def subject_family_name(field: Field = None) -> Optional[str]:
"""
Returns an uppercase family name based on the 600 MARC tag
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "600":
return None
elif field.indicator1 != "3":
return None
try:
stop = field["a"].index("family")
name = field["a"][:stop]
except ValueError:
return None
name = normalize_value(name)
return name
def subject_personal_name(field: Field = None) -> Optional[str]:
"""
Returns personal name to be used in subject segment of the call
number. Use for biography or Dewey + Name patters, examples:
biography: B LOUIS XIV C
criticizm of works of an author: 813 ADAMS C
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "600":
return None
name = personal_name_surname(field)
return name
def subject_topic(field: Field = None) -> Optional[str]:
"""
Returns an uppercase topic to be used in the subject segment of the call
number based on MARC tag 650. Valid only for BPL call numbers.
Examples: programming language, name of operating system, etc.
Args:
field: pymarc.Field instance
Returns:
topic
"""
pass
def title_first_word(field: Field = None) -> Optional[str]:
"""
Returns an uppercase first word (skipping any articles) of
the title field (245 MARC tag subfield $a).
Args:
field: pymarc.Field instance
Returns:
word
"""
pass
def title_initial(field: Field = None) -> Optional[str]:
"""
Returns an uppercase initial (skipping any articles) of
the title field (245 MARC tag subfield $a).
Args:
field: pymarc.Field instance
Returns:
initial
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "245":
return None
try:
ind2 = int(field.indicator2)
except ValueError:
return None
title = field["a"][ind2:]
title = normalize_value(title)
initial = title[0]
return initial
| 0
| 0
| 0
|
b721c28c4d1d01229eaf38efadeba74addb10f97
| 1,310
|
py
|
Python
|
ex31.py
|
Lorranysousc/ExerciciosDeRepeticao
|
4b8ac1c4eb3ac5d2739456a4f967e094fad70256
|
[
"MIT"
] | null | null | null |
ex31.py
|
Lorranysousc/ExerciciosDeRepeticao
|
4b8ac1c4eb3ac5d2739456a4f967e094fad70256
|
[
"MIT"
] | null | null | null |
ex31.py
|
Lorranysousc/ExerciciosDeRepeticao
|
4b8ac1c4eb3ac5d2739456a4f967e094fad70256
|
[
"MIT"
] | null | null | null |
'''O Sr. Manoel Joaquim expandiu seus negócios para além dos negócios de 1,99 e agora possui uma loja de conveniências. Faça um programa que implemente uma caixa registradora rudimentar. O programa deverá receber um número desconhecido de valores referentes aos preços das mercadorias. Um valor zero deve ser informado pelo operador para indicar o final da compra. O programa deve então mostrar o total da compra e perguntar o valor em dinheiro que o cliente forneceu, para então calcular e mostrar o valor do troco. Após esta operação, o programa deverá voltar ao ponto inicial, para registrar a próxima compra. A saída deve ser conforme o exemplo abaixo: '''
from time import sleep
start = 1
while start == 1: #Reinicia o programa quando chega ao final.
print('LOJAS TABAJARA')
cont = 1
valor_produto = ''
total_compra = 0
while valor_produto != 0: #Recebe valor dos produtos comprados.
valor_produto = float(input(f'Produto {cont}: R$ '))
total_compra += valor_produto
cont += 1
if valor_produto == 0: #Finaliza o programa.
print(f'Total: R$ {total_compra:.2f}')
dinheiro_cliente = float(input('Dinheiro: R$ '))
troco = dinheiro_cliente - total_compra
print(f'Troco: R$ {troco:.2f}')
sleep(3)
| 65.5
| 660
| 0.703817
|
'''O Sr. Manoel Joaquim expandiu seus negócios para além dos negócios de 1,99 e agora possui uma loja de conveniências. Faça um programa que implemente uma caixa registradora rudimentar. O programa deverá receber um número desconhecido de valores referentes aos preços das mercadorias. Um valor zero deve ser informado pelo operador para indicar o final da compra. O programa deve então mostrar o total da compra e perguntar o valor em dinheiro que o cliente forneceu, para então calcular e mostrar o valor do troco. Após esta operação, o programa deverá voltar ao ponto inicial, para registrar a próxima compra. A saída deve ser conforme o exemplo abaixo: '''
from time import sleep
start = 1
while start == 1: #Reinicia o programa quando chega ao final.
print('LOJAS TABAJARA')
cont = 1
valor_produto = ''
total_compra = 0
while valor_produto != 0: #Recebe valor dos produtos comprados.
valor_produto = float(input(f'Produto {cont}: R$ '))
total_compra += valor_produto
cont += 1
if valor_produto == 0: #Finaliza o programa.
print(f'Total: R$ {total_compra:.2f}')
dinheiro_cliente = float(input('Dinheiro: R$ '))
troco = dinheiro_cliente - total_compra
print(f'Troco: R$ {troco:.2f}')
sleep(3)
| 0
| 0
| 0
|
c9bdceebaee8f789e4c6a4a1d04b4ef5a1c5d7f9
| 399
|
py
|
Python
|
tests/unit/dummy/__init__.py
|
fabiannagel/schnetkit
|
bf0b9055bdc393d01ac6c3d5f17bb9db13297e32
|
[
"MIT"
] | 1
|
2021-11-03T15:13:48.000Z
|
2021-11-03T15:13:48.000Z
|
tests/unit/dummy/__init__.py
|
fabiannagel/schnetkit
|
bf0b9055bdc393d01ac6c3d5f17bb9db13297e32
|
[
"MIT"
] | null | null | null |
tests/unit/dummy/__init__.py
|
fabiannagel/schnetkit
|
bf0b9055bdc393d01ac6c3d5f17bb9db13297e32
|
[
"MIT"
] | 1
|
2022-02-02T17:34:05.000Z
|
2022-02-02T17:34:05.000Z
|
from schnetkit.engine import Stateful
models = [Dummy]
| 15.96
| 37
| 0.573935
|
from schnetkit.engine import Stateful
class Dummy(Stateful):
def __init__(self, a=2):
self.a = a
self.state = "great"
def get_dict(self):
return {"a": self.a}
def get_state(self):
return {"state": self.state}
def restore(self, payload):
self.state = payload["state"]
def work(self):
self.state = "tired"
models = [Dummy]
| 182
| 1
| 158
|
4324c7df6b13227127944ca0a19c1650df6f0e53
| 7,020
|
py
|
Python
|
zigbear/custom_protocol/SecurityLayer.py
|
philippnormann/zigbear
|
3cfdb4c9b13adf1e785f27109194b575edf241af
|
[
"BSD-3-Clause"
] | 14
|
2020-04-15T09:43:20.000Z
|
2022-01-29T19:36:27.000Z
|
zigbear/custom_protocol/SecurityLayer.py
|
philippnormann1337/zigbear
|
3cfdb4c9b13adf1e785f27109194b575edf241af
|
[
"BSD-3-Clause"
] | null | null | null |
zigbear/custom_protocol/SecurityLayer.py
|
philippnormann1337/zigbear
|
3cfdb4c9b13adf1e785f27109194b575edf241af
|
[
"BSD-3-Clause"
] | 1
|
2020-06-06T21:41:10.000Z
|
2020-06-06T21:41:10.000Z
|
import secrets
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
from zigbear.custom_protocol.scapy_layers import ZigbearSecurityLayer
| 43.602484
| 110
| 0.670228
|
import secrets
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
from zigbear.custom_protocol.scapy_layers import ZigbearSecurityLayer
class SecurityLayer:
def __init__(self, networkLayer, network_key=None):
self.networkLayer = networkLayer
self.framecount = secrets.randbelow(2 ** 32)
self.key_cache = {}
self.framecount_cache = {}
# Can and should be none for non-coordinators (has to be 128, 192 or 256 bit)
self.network_key = network_key
self.receive_callback = lambda source, port, data: source
self.networkLayer.set_receive_callback(self.receive)
def new_framecount(self):
s = self.framecount
self.framecount = (self.framecount + 1) % 2 ** 32
return s
def check_framecount(self, source, framecount):
if source in self.framecount_cache:
result = self.framecount_cache[source] < framecount
else:
self.framecount_cache[source] = framecount
result = True
return result
def set_source_framecount(self, source, framecount):
self.framecount_cache[source] = framecount
def set_receive_callback(self, callback):
self.receive_callback = callback
def enable_pairing_mode(self):
self.network_key = None
def make_security_packet(self, data):
try:
sec = ZigbearSecurityLayer(data)
except:
sec = None
return sec
def receive(self, source, port, data):
sec = self.make_security_packet(data)
if sec:
applayer_data = None
if self.check_framecount(source, sec.fc):
if sec.message_type == 0:
applayer_data = sec.data
elif sec.message_type == 1:
self.handle_pairing_request(source, port, sec.data, sec.flags & 1)
elif sec.message_type == 2 and not self.network_key:
self.handle_network_key(source, sec.fc, sec.data, sec.mac)
else:
applayer_data = self.handle_encrypted_data(source, sec.fc, sec.data, sec.mac)
if applayer_data:
self.receive_callback(source, port, applayer_data)
def handle_pairing_request(self, source, port, secdata, reply):
self.generate_public_key(source)
peer_public_key = self.deserialize_public_key(secdata)
self.key_cache[source]["peer_public_key"] = peer_public_key
self.generate_derived_keys(source, peer_public_key, b"test")
if reply:
self.send(source, port, self.serialize_public_key(self.key_cache[source]["public_key"]), 1, 0)
def handle_network_key(self, source, framecount, secdata, mac):
error, network_key = self.decryption(framecount, secdata, mac, source, True)
if not error:
self.network_key = network_key
self.key_cache.pop(source, None)
self.set_source_framecount(source, framecount)
def handle_encrypted_data(self, source, framecount, secdata, mac):
error, applayer_data = self.decryption(framecount, secdata, mac, source)
if not error:
self.set_source_framecount(source, framecount)
return applayer_data
def send(self, destination, port, data, message_type=3, flags=0):
packet = ZigbearSecurityLayer(flags=flags, message_type=message_type, fc=self.new_framecount())
packet_data = mac = None
if message_type == 0:
packet.data = data
elif message_type == 1:
packet.data = self.handle_prepare_pk(destination)
elif message_type == 2:
packet.data, packet.mac = self.handle_prepare_nwk(destination, packet.fc)
else:
packet.data, packet.mac = self.handle_prepare_encdata(destination, packet.fc, data)
self.networkLayer.send(destination, port, packet)
def handle_prepare_pk(self, destination):
self.generate_public_key(destination)
return self.serialize_public_key(self.key_cache[destination]["public_key"])
def handle_prepare_nwk(self, destination, framecount):
return self.encryption(framecount, self.network_key, destination, True)
def handle_prepare_encdata(self, destination, framecount, data):
return self.encryption(framecount, data.build(), destination)
def get_connection_attempts(self):
return list(self.key_cache.keys())
def generate_public_key(self, source):
if source not in self.key_cache or "public_key" not in self.key_cache[source]:
self.key_cache[source] = {}
new_private_key = ec.generate_private_key(ec.SECP224R1(), default_backend())
self.key_cache[source]["public_key"] = new_private_key.public_key()
self.key_cache[source]["private_key"] = new_private_key
def serialize_public_key(self, public_key):
return public_key.public_bytes(encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
def deserialize_public_key(self, serialized_key):
return serialization.load_der_public_key(serialized_key, backend=default_backend())
def generate_derived_keys(self, source, peer_public_key, salt):
shared_key = self.key_cache[source]["private_key"].exchange(ec.ECDH(), peer_public_key)
self.key_cache[source]["shared_encryption_key"] = self.derive_key(b"encryption key", salt, shared_key)
def derive_key(self, info, salt, shared_key):
return HKDF(algorithm=hashes.SHA256(), length=32, salt=salt, info=info,
backend=default_backend()
).derive(shared_key)
def get_nonce(self, framecount, destination):
return framecount.to_bytes(4, byteorder='big')
def encryption(self, framecount, data, destination, shared=False):
key = self.key_cache[destination]["shared_encryption_key"] if shared else self.network_key
nonce = self.get_nonce(framecount, destination)
aesgcm = AESGCM(key)
sk_encrypted = aesgcm.encrypt(nonce, data, None)
return (sk_encrypted[:-16], int.from_bytes(sk_encrypted[-16:], 'big'))
def decryption(self, framecount, data, mac, source, shared=False):
key = self.key_cache[source]["shared_encryption_key"] if shared else self.network_key
if shared:
self.key_cache.pop(source, None)
nonce = self.get_nonce(framecount, source)
aesgcm = AESGCM(key)
error = result = None
try:
result = aesgcm.decrypt(nonce, data + mac.to_bytes(16, 'big'), None)
except:
error = 1
return (error, result)
| 5,922
| -1
| 670
|
9af2d928d6cc2a53fd788a67b6c0a78899bbda9e
| 1,106
|
py
|
Python
|
tracks/BamFeatures.py
|
goeckslab/jbrowse-archive-creator
|
438557136c9dd4eb0db89835e5d253e44b50a7a3
|
[
"AFL-3.0"
] | null | null | null |
tracks/BamFeatures.py
|
goeckslab/jbrowse-archive-creator
|
438557136c9dd4eb0db89835e5d253e44b50a7a3
|
[
"AFL-3.0"
] | null | null | null |
tracks/BamFeatures.py
|
goeckslab/jbrowse-archive-creator
|
438557136c9dd4eb0db89835e5d253e44b50a7a3
|
[
"AFL-3.0"
] | null | null | null |
#!/usr/bin/env python2
import os
import json
import logging
from TrackDb import TrackDb
from util import subtools
from util import santitizer
| 38.137931
| 114
| 0.699819
|
#!/usr/bin/env python2
import os
import json
import logging
from TrackDb import TrackDb
from util import subtools
from util import santitizer
class BamFeatures(TrackDb):
def __init__(self, trackName, trackLabel, trackDataURL, trackType, dataType, extraSettings=None):
super(BamFeatures, self).__init__(trackName, trackLabel, trackDataURL, trackType, dataType, extraSettings)
def prepareExtraSetting(self):
if 'category' not in self.extraSettings or not self.extraSettings['category']:
self.extraSettings['category'] = "Default group"
bam_track = dict()
bam_track['type'] = 'JBrowse/View/Track/Alignments2'
bam_track['storeClass'] = 'JBrowse/Store/SeqFeature/BAM'
bam_track['urlTemplate'] = os.path.join('bbi', self.trackName)
bam_track['baiUrlTemplate'] = os.path.join('bbi', self.extraSettings['index'])
bam_track['label'] = self.trackLabel
bam_track['category'] = self.extraSettings['category']
#extraConfigs = json.dumps(bam_track)
extraConfigs = bam_track
return extraConfigs
| 874
| 6
| 77
|
defb13f18fc11dc096d17386bf5d7d31a9e0c762
| 5,573
|
py
|
Python
|
coolamqp/uplink/handshake.py
|
smok-serwis/coolamqp
|
d57ada0d478bd1ca94743ae341f6819ba85ea253
|
[
"MIT"
] | 4
|
2018-06-20T13:59:35.000Z
|
2021-08-31T12:03:59.000Z
|
coolamqp/uplink/handshake.py
|
piotrmaslanka/coolamqp
|
d57ada0d478bd1ca94743ae341f6819ba85ea253
|
[
"MIT"
] | 33
|
2016-06-03T11:41:09.000Z
|
2020-07-09T17:48:28.000Z
|
coolamqp/uplink/handshake.py
|
smok-serwis/coolamqp
|
d57ada0d478bd1ca94743ae341f6819ba85ea253
|
[
"MIT"
] | null | null | null |
# coding=UTF-8
from __future__ import absolute_import, division, print_function
"""
Provides reactors that can authenticate an AQMP session
"""
import six
import typing as tp
import copy
import logging
from coolamqp.framing.definitions import ConnectionStart, ConnectionStartOk, \
ConnectionTune, ConnectionTuneOk, ConnectionOpen, ConnectionOpenOk
from coolamqp.framing.frames import AMQPMethodFrame
from coolamqp.uplink.connection.states import ST_ONLINE
from coolamqp.uplink.heartbeat import Heartbeater
from coolamqp import __version__
PUBLISHER_CONFIRMS = b'publisher_confirms'
CONSUMER_CANCEL_NOTIFY = b'consumer_cancel_notify'
CONNECTION_BLOCKED = b'connection.blocked'
SUPPORTED_EXTENSIONS = [
PUBLISHER_CONFIRMS,
CONSUMER_CANCEL_NOTIFY, # half assed support - we just .cancel the consumer, see #12
CONNECTION_BLOCKED
]
CLIENT_DATA = [
# because RabbitMQ is some kind of a fascist and does not allow
# these fields to be of type short-string
(b'product', (b'CoolAMQP', 'S')),
(b'version', (__version__.encode('utf8'), 'S')),
(b'copyright', (b'Copyright (C) 2016-2021 SMOK sp. z o.o.', 'S')),
(
b'information', (
b'Licensed under the MIT License.\nSee https://github.com/smok-serwis/coolamqp for details',
'S')),
(b'capabilities',
([(capa, (True, 't')) for capa in SUPPORTED_EXTENSIONS], 'F')),
]
WATCHDOG_TIMEOUT = 10
logger = logging.getLogger(__name__)
class Handshaker(object):
"""
Object that given a connection rolls the handshake.
"""
def __init__(self, connection, # type: coolamqp.uplink.connection.Connection
node_definition, # type: coolamqp.objects.NodeDefinition
on_success, # type: tp.Callable[[], None]
extra_properties=None # type: tp.Dict[bytes, tp.Tuple[tp.Any, str]]
):
"""
:param connection: Connection instance to use
:type node_definition: NodeDefinition
:param on_success: callable/0, on success
"""
self.connection = connection
self.login = node_definition.user.encode('utf8')
self.password = node_definition.password.encode('utf8')
self.virtual_host = node_definition.virtual_host.encode('utf8')
self.heartbeat = node_definition.heartbeat or 0
self.connection.watch_for_method(0, ConnectionStart,
self.on_connection_start)
# Callbacks
self.on_success = on_success
self.EXTRA_PROPERTIES = extra_properties or []
# Called by internal setup
def on_watchdog(self):
"""
Called WATCHDOG_TIMEOUT seconds after setup begins
If we are not ST_ONLINE after that much, something is wrong and pwn this connection.
"""
# Not connected in 20 seconds - abort
if self.connection.state != ST_ONLINE:
# closing the connection this way will get to Connection by channels of ListenerThread
self.connection.send(None)
| 39.524823
| 104
| 0.624978
|
# coding=UTF-8
from __future__ import absolute_import, division, print_function
"""
Provides reactors that can authenticate an AQMP session
"""
import six
import typing as tp
import copy
import logging
from coolamqp.framing.definitions import ConnectionStart, ConnectionStartOk, \
ConnectionTune, ConnectionTuneOk, ConnectionOpen, ConnectionOpenOk
from coolamqp.framing.frames import AMQPMethodFrame
from coolamqp.uplink.connection.states import ST_ONLINE
from coolamqp.uplink.heartbeat import Heartbeater
from coolamqp import __version__
PUBLISHER_CONFIRMS = b'publisher_confirms'
CONSUMER_CANCEL_NOTIFY = b'consumer_cancel_notify'
CONNECTION_BLOCKED = b'connection.blocked'
SUPPORTED_EXTENSIONS = [
PUBLISHER_CONFIRMS,
CONSUMER_CANCEL_NOTIFY, # half assed support - we just .cancel the consumer, see #12
CONNECTION_BLOCKED
]
CLIENT_DATA = [
# because RabbitMQ is some kind of a fascist and does not allow
# these fields to be of type short-string
(b'product', (b'CoolAMQP', 'S')),
(b'version', (__version__.encode('utf8'), 'S')),
(b'copyright', (b'Copyright (C) 2016-2021 SMOK sp. z o.o.', 'S')),
(
b'information', (
b'Licensed under the MIT License.\nSee https://github.com/smok-serwis/coolamqp for details',
'S')),
(b'capabilities',
([(capa, (True, 't')) for capa in SUPPORTED_EXTENSIONS], 'F')),
]
WATCHDOG_TIMEOUT = 10
logger = logging.getLogger(__name__)
class Handshaker(object):
"""
Object that given a connection rolls the handshake.
"""
def __init__(self, connection, # type: coolamqp.uplink.connection.Connection
node_definition, # type: coolamqp.objects.NodeDefinition
on_success, # type: tp.Callable[[], None]
extra_properties=None # type: tp.Dict[bytes, tp.Tuple[tp.Any, str]]
):
"""
:param connection: Connection instance to use
:type node_definition: NodeDefinition
:param on_success: callable/0, on success
"""
self.connection = connection
self.login = node_definition.user.encode('utf8')
self.password = node_definition.password.encode('utf8')
self.virtual_host = node_definition.virtual_host.encode('utf8')
self.heartbeat = node_definition.heartbeat or 0
self.connection.watch_for_method(0, ConnectionStart,
self.on_connection_start)
# Callbacks
self.on_success = on_success
self.EXTRA_PROPERTIES = extra_properties or []
# Called by internal setup
def on_watchdog(self):
"""
Called WATCHDOG_TIMEOUT seconds after setup begins
If we are not ST_ONLINE after that much, something is wrong and pwn this connection.
"""
# Not connected in 20 seconds - abort
if self.connection.state != ST_ONLINE:
# closing the connection this way will get to Connection by channels of ListenerThread
self.connection.send(None)
def on_connection_start(self, payload # type: coolamqp.framing.base.AMQPPayload
):
sasl_mechanisms = payload.mechanisms.tobytes().split(b' ')
locale_supported = payload.locales.tobytes().split(b' ')
# Select a mechanism
if b'PLAIN' not in sasl_mechanisms:
raise ValueError('Server does not support PLAIN')
# Select capabilities
server_props = dict(payload.server_properties)
if b'capabilities' in server_props:
for label, fv in server_props[b'capabilities'][0]:
if label in SUPPORTED_EXTENSIONS:
if fv[0]:
self.connection.extensions.append(label)
self.connection.watchdog(WATCHDOG_TIMEOUT, self.on_watchdog)
self.connection.watch_for_method(0, ConnectionTune,
self.on_connection_tune)
CLIENT_DATA_c = copy.copy(CLIENT_DATA)
CLIENT_DATA_c.extend(self.EXTRA_PROPERTIES)
self.connection.send([
AMQPMethodFrame(0,
ConnectionStartOk(CLIENT_DATA_c, b'PLAIN',
b'\x00' + self.login + b'\x00' + self.password,
locale_supported[0]
))
])
def on_connection_tune(self, payload # type: coolamqp.framing.base.AMQPPayload
):
self.connection.frame_max = payload.frame_max
self.connection.heartbeat = min(payload.heartbeat, self.heartbeat)
self.connection.free_channels.extend(six.moves.xrange(1, (
65535 if payload.channel_max == 0 else payload.channel_max) + 1))
self.connection.watch_for_method(0, ConnectionOpenOk,
self.on_connection_open_ok)
self.connection.send([
AMQPMethodFrame(0, ConnectionTuneOk(payload.channel_max,
payload.frame_max,
self.connection.heartbeat)),
AMQPMethodFrame(0, ConnectionOpen(self.virtual_host))
])
# Install heartbeat handlers NOW, if necessary
if self.connection.heartbeat > 0:
Heartbeater(self.connection, self.connection.heartbeat)
def on_connection_open_ok(self, payload # type: coolamqp.framing.base.AMQPPayload
):
self.on_success()
| 2,447
| 0
| 81
|
08045555ebdef5af831c50bb02363844d684733e
| 10,852
|
py
|
Python
|
nodejs-mobile/test/testpy/__init__.py
|
xuelongqy/cnode
|
ac256264d329e68b6c5ae3281b0e7bb5a95ae164
|
[
"MIT"
] | null | null | null |
nodejs-mobile/test/testpy/__init__.py
|
xuelongqy/cnode
|
ac256264d329e68b6c5ae3281b0e7bb5a95ae164
|
[
"MIT"
] | 4
|
2020-03-13T14:45:49.000Z
|
2020-03-15T16:31:22.000Z
|
nodejs-mobile/test/testpy/__init__.py
|
xuelongqy/cnode
|
ac256264d329e68b6c5ae3281b0e7bb5a95ae164
|
[
"MIT"
] | 1
|
2020-03-15T16:02:18.000Z
|
2020-03-15T16:02:18.000Z
|
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import test
import os
from os.path import join, dirname, exists, splitext, isdir, basename
import re
import ast
FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
chakraBannedFlags = ["--expose_externalize_string"]
| 39.176895
| 89
| 0.646056
|
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import test
import os
from os.path import join, dirname, exists, splitext, isdir, basename
import re
import ast
FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
chakraBannedFlags = ["--expose_externalize_string"]
class SimpleTestCase(test.TestCase):
def __init__(self, path, file, arch, mode, context, config, jsEngine, additional=None):
super(SimpleTestCase, self).__init__(context, path, arch, mode)
self.file = file
self.config = config
self.arch = arch
self.mode = mode
self.jsEngine = jsEngine
if additional is not None:
self.additional_flags = additional
else:
self.additional_flags = []
def GetLabel(self):
return "%s %s" % (self.mode, self.GetName())
def GetName(self):
return self.path[-1]
def GetCommand(self):
result = [self.config.context.GetVm(self.arch, self.mode)]
source = open(self.file).read()
flags_match = FLAGS_PATTERN.search(source)
if flags_match:
flag = flags_match.group(1).strip().split()
if self.jsEngine == "chakracore":
flag = filter(lambda x: x not in chakraBannedFlags, flag)
# The following block reads config.gypi to extract the v8_enable_inspector
# value. This is done to check if the inspector is disabled in which case
# the '--inspect' flag cannot be passed to the node process as it will
# cause node to exit and report the test as failed. The use case
# is currently when Node is configured --without-ssl and the tests should
# still be runnable but skip any tests that require ssl (which includes
# the inspector related tests). Also, if there is no ssl support the
# options '--use-bundled-ca' and '--use-openssl-ca' will also cause a
# similar failure so such tests are also skipped.
if len(flag) == 0:
pass
elif ('--inspect' in flag[0] or \
'--use-bundled-ca' in flag[0] or \
'--use-openssl-ca' in flag[0]) and \
self.context.v8_enable_inspector == 0:
print('Skipping as node was configured --without-ssl')
else:
result += flag
files_match = FILES_PATTERN.search(source);
additional_files = []
if files_match:
additional_files += files_match.group(1).strip().split()
for a_file in additional_files:
result.append(join(dirname(self.config.root), '..', a_file))
if self.additional_flags:
result += self.additional_flags
result += [self.file]
return result
def GetSource(self):
return open(self.file).read()
class MessageTestCase(SimpleTestCase):
def __init__(self, path, file, arch, mode, context, config, expected,
jsEngine, additional=None):
super(MessageTestCase, self).__init__(path, file, arch, mode, context,
config, jsEngine, additional)
self.expected = expected
def IgnoreLine(self, str):
"""Ignore empty lines and valgrind output."""
if not str.strip(): return True
else: return str.startswith('==') or str.startswith('**')
def IsFailureOutput(self, output):
f = file(self.expected)
# Skip initial '#' comment and spaces
#for line in f:
# if (not line.startswith('#')) and (not line.strip()):
# break
# Convert output lines to regexps that we can match
env = { 'basename': basename(self.file) }
patterns = [ ]
for line in f:
if not line.strip():
continue
pattern = re.escape(line.rstrip() % env)
pattern = pattern.replace('\\*', '.*')
pattern = '^%s$' % pattern
patterns.append(pattern)
# Compare actual output with the expected
raw_lines = (output.stdout + output.stderr).split('\n')
outlines = [ s for s in raw_lines if not self.IgnoreLine(s) ]
if len(outlines) != len(patterns):
print "length differs."
print "expect=%d" % len(patterns)
print "actual=%d" % len(outlines)
print "patterns:"
for i in xrange(len(patterns)):
print "pattern = %s" % patterns[i]
print "outlines:"
for i in xrange(len(outlines)):
print "outline = %s" % outlines[i]
return True
for i in xrange(len(patterns)):
if not re.match(patterns[i], outlines[i]):
print "match failed"
print "line=%d" % i
print "expect=%s" % patterns[i]
print "actual=%s" % outlines[i]
return True
return False
def GetSource(self):
return (open(self.file).read()
+ "\n--- expected output ---\n"
+ open(self.expected).read())
class SimpleTestConfiguration(test.TestConfiguration):
def __init__(self, context, root, section, additional=None):
super(SimpleTestConfiguration, self).__init__(context, root)
self.section = section
if additional is not None:
self.additional_flags = additional
else:
self.additional_flags = []
def Ls(self, path):
return [f for f in os.listdir(path) if re.match('^test-.*\.m?js$', f)]
def ListTests(self, current_path, path, arch, mode, jsEngine):
all_tests = [current_path + [t] for t in self.Ls(join(self.root))]
result = []
for test in all_tests:
if self.Contains(path, test):
file_path = join(self.root, reduce(join, test[1:], ""))
test_name = test[:-1] + [splitext(test[-1])[0]]
result.append(SimpleTestCase(test_name, file_path, arch, mode,
self.context, self, jsEngine,
self.additional_flags))
return result
def GetBuildRequirements(self):
return ['sample', 'sample=shell']
def GetTestStatus(self, sections, defs):
status_file = join(self.root, '%s.status' % (self.section))
if exists(status_file):
test.ReadConfigurationInto(status_file, sections, defs)
class ParallelTestConfiguration(SimpleTestConfiguration):
def __init__(self, context, root, section, additional=None):
super(ParallelTestConfiguration, self).__init__(context, root, section,
additional)
def ListTests(self, current_path, path, arch, mode, jsEngine):
result = super(ParallelTestConfiguration, self).ListTests(
current_path, path, arch, mode, jsEngine)
for test in result:
test.parallel = True
return result
class AddonTestConfiguration(SimpleTestConfiguration):
def __init__(self, context, root, section, additional=None):
super(AddonTestConfiguration, self).__init__(context, root, section,
additional)
def Ls(self, path):
def SelectTest(name):
return name.endswith('.js')
result = []
for subpath in os.listdir(path):
if os.path.isdir(join(path, subpath)):
for f in os.listdir(join(path, subpath)):
if SelectTest(f):
result.append([subpath, f[:-3]])
return result
def ListTests(self, current_path, path, arch, mode, jsEngine):
all_tests = [current_path + t for t in self.Ls(join(self.root))]
result = []
for test in all_tests:
if self.Contains(path, test):
file_path = join(self.root, reduce(join, test[1:], "") + ".js")
result.append(
SimpleTestCase(test, file_path, arch, mode, self.context, self,
jsEngine, self.additional_flags))
return result
class AbortTestConfiguration(SimpleTestConfiguration):
def __init__(self, context, root, section, additional=None):
super(AbortTestConfiguration, self).__init__(context, root, section,
additional)
def ListTests(self, current_path, path, arch, mode, jsEngine):
result = super(AbortTestConfiguration, self).ListTests(
current_path, path, arch, mode, jsEngine)
for test in result:
test.disable_core_files = True
return result
class MessageTestConfiguration(SimpleTestConfiguration):
def __init__(self, context, root, section, additional=None):
super(MessageTestConfiguration, self).__init__(context, root, section,
additional)
def Ls(self, path):
if isdir(path):
return [f for f in os.listdir(path)
if f.endswith('.js') or f.endswith('.mjs')]
else:
return []
def ListTests(self, current_path, path, arch, mode, jsEngine):
all_tests = [current_path + [t] for t in self.Ls(join(self.root))]
result = []
for test in all_tests:
if self.Contains(path, test):
test_name = test[:-1] + [splitext(test[-1])[0]]
file_path = join(self.root, reduce(join, test[1:], ''))
file_prefix = file_path[:file_path.rfind('.')]
engine_output_path = file_prefix + (".%s.out" % jsEngine)
output_path = file_prefix + '.out'
if exists(engine_output_path):
output_path = engine_output_path
else:
if not exists(output_path):
raise Exception("Could not find %s" % output_path)
result.append(MessageTestCase(test_name, file_path, arch, mode,
self.context, self, output_path, jsEngine,
self.additional_flags))
return result
| 7,907
| 455
| 657
|
3e13d9f04c5b9e380942a3048140fa5f7f9bee3d
| 919
|
py
|
Python
|
patterns/creational/factory_method.py
|
zhaijingrong/patterns_in_python
|
8cb53a58cbb78dc7ed578887a8e7c481cfa72c80
|
[
"MIT"
] | null | null | null |
patterns/creational/factory_method.py
|
zhaijingrong/patterns_in_python
|
8cb53a58cbb78dc7ed578887a8e7c481cfa72c80
|
[
"MIT"
] | null | null | null |
patterns/creational/factory_method.py
|
zhaijingrong/patterns_in_python
|
8cb53a58cbb78dc7ed578887a8e7c481cfa72c80
|
[
"MIT"
] | null | null | null |
"""
抽象工厂方法--对象创建型模式
1. 目标
定义一个用于创建对象的接口, 让子类决定实例化哪一个类, 使一个类的实例化延迟到子类。
"""
if __name__ == '__main__':
cream_cake_factory = CreamCakeFactory()
cream_cake = cream_cake_factory.make_cake()
print(cream_cake)
fruit_cake_factory = FruitCakeFactory()
fruit_cake = fruit_cake_factory.make_cake()
print(fruit_cake)
| 19.145833
| 47
| 0.671382
|
"""
抽象工厂方法--对象创建型模式
1. 目标
定义一个用于创建对象的接口, 让子类决定实例化哪一个类, 使一个类的实例化延迟到子类。
"""
class CakeFactory(object):
def make_cake(self):
print('make a cake')
class CreamCakeFactory(CakeFactory):
def make_cake(self):
print('make a cream cake')
return CreamCake()
class FruitCakeFactory(CakeFactory):
def make_cake(self):
print('make a fruit cake')
return FruitCake()
class Cake(object):
def __repr__(self):
return 'This is a cake'
class CreamCake(Cake):
def __repr__(self):
return 'This is a cream cake'
class FruitCake(Cake):
def __repr__(self):
return 'This is a fruit cake'
if __name__ == '__main__':
cream_cake_factory = CreamCakeFactory()
cream_cake = cream_cake_factory.make_cake()
print(cream_cake)
fruit_cake_factory = FruitCakeFactory()
fruit_cake = fruit_cake_factory.make_cake()
print(fruit_cake)
| 252
| 35
| 294
|
8a8e864f9097a33ac84f3576473fa8671c78d0e2
| 1,583
|
py
|
Python
|
website/account/models.py
|
divmoe/DASHBOARD
|
42927dfca3797e0bde3e59288a156e33aec6790d
|
[
"MIT"
] | null | null | null |
website/account/models.py
|
divmoe/DASHBOARD
|
42927dfca3797e0bde3e59288a156e33aec6790d
|
[
"MIT"
] | null | null | null |
website/account/models.py
|
divmoe/DASHBOARD
|
42927dfca3797e0bde3e59288a156e33aec6790d
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
| 34.413043
| 77
| 0.722678
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Customer (models.Model):
user=models.OneToOneField(User,null=True,on_delete=models.CASCADE)
name = models.CharField(max_length=100,null=True)
email= models.CharField(max_length=100,null=True)
phone= models.CharField(max_length=100,null=True)
photo=models.ImageField(null=True,blank=True)
def __str__ (self):
return self. name
class Tag(models.Model):
name = models.CharField(max_length=100,null=True)
def __str__ (self):
return self. name
class Product(models.Model):
CATOGORY={
('indoor','indoor'),
('OUT DOOR','OUT DOOR')
}
name=models.CharField(max_length=100,null=True)
price=models.FloatField(null=True)
catogory=models.CharField(max_length=100,null=True,choices=CATOGORY)
description=models.CharField(max_length=100,null=True,blank=True)
date_created=models.DateTimeField(auto_now_add=True,null=True)
tag = models.ManyToManyField(Tag)
def __str__ (self):
return self. name
class Order(models.Model):
STATUS={('pending','pending'),
('out for delivery','out for delivery'),
('Delivered','Delivered')
}
customer =models.ForeignKey(Customer,null=True,on_delete=models.SET_NULL)
product =models.ForeignKey(Product,null=True,on_delete=models.SET_NULL)
status= models.CharField(max_length=100,null=True,choices=STATUS)
date_created = models.DateTimeField(auto_now_add=True,null=True)
def __str__(self):
return self.product.name
| 102
| 1,290
| 90
|
0ce133badac8ace62355d38651dd265c044af4eb
| 1,169
|
py
|
Python
|
chromeos/tools/concat_dbus_conf_files.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668
|
2015-01-01T01:57:10.000Z
|
2022-03-31T23:33:32.000Z
|
chromeos/tools/concat_dbus_conf_files.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 86
|
2015-10-21T13:02:42.000Z
|
2022-03-14T07:50:50.000Z
|
chromeos/tools/concat_dbus_conf_files.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941
|
2015-01-02T11:32:21.000Z
|
2022-03-31T16:35:46.000Z
|
#!/usr/bin/env python
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Concatenates D-Bus busconfig files."""
import sys
import xml.etree.ElementTree
_BUSCONFIG_FILE_HEADER = b"""<!DOCTYPE busconfig
PUBLIC "-//freedesktop//DTD D-Bus Bus Configuration 1.0//EN"
"http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
"""
if __name__ == '__main__':
main()
| 26.568182
| 72
| 0.6929
|
#!/usr/bin/env python
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Concatenates D-Bus busconfig files."""
import sys
import xml.etree.ElementTree
_BUSCONFIG_FILE_HEADER = b"""<!DOCTYPE busconfig
PUBLIC "-//freedesktop//DTD D-Bus Bus Configuration 1.0//EN"
"http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
"""
def main():
if len(sys.argv) < 3:
sys.stderr.write('Usage: %s OUTFILE INFILES\n' % (sys.argv[0]))
sys.exit(1)
out_path = sys.argv[1]
in_paths = sys.argv[2:]
# Parse the first input file.
tree = xml.etree.ElementTree.parse(in_paths[0])
assert(tree.getroot().tag == 'busconfig')
# Append the remaining input files to the first file.
for path in in_paths[1:]:
current_tree = xml.etree.ElementTree.parse(path)
assert(current_tree.getroot().tag == 'busconfig')
for child in current_tree.getroot():
tree.getroot().append(child)
# Output the result.
with open(out_path, "wb") as f:
f.write(_BUSCONFIG_FILE_HEADER)
tree.write(f)
if __name__ == '__main__':
main()
| 657
| 0
| 23
|
02525ed7d476b11f1d77ac07f48e44ec57a3ff58
| 282
|
py
|
Python
|
rand.py
|
sriharikapu/RandomSequenceGenerator
|
7491e43b117be3e24eb5b7d66762699ef4d7593a
|
[
"CC0-1.0"
] | 1
|
2022-02-08T01:47:03.000Z
|
2022-02-08T01:47:03.000Z
|
rand.py
|
sriharikapu/RandomSequenceGenerator
|
7491e43b117be3e24eb5b7d66762699ef4d7593a
|
[
"CC0-1.0"
] | null | null | null |
rand.py
|
sriharikapu/RandomSequenceGenerator
|
7491e43b117be3e24eb5b7d66762699ef4d7593a
|
[
"CC0-1.0"
] | null | null | null |
import sys;
import numpy as np;
import pandas as pd;
np.set_printoptions(threshold=sys.maxsize)
# replace the range, sample size with your custom numbers
arr = np.array(np.random.choice(range(10000), 10000, replace=False))
print(arr)
DF = pd.DataFrame(arr)
DF.to_csv("temp.csv")
| 25.636364
| 69
| 0.755319
|
import sys;
import numpy as np;
import pandas as pd;
np.set_printoptions(threshold=sys.maxsize)
# replace the range, sample size with your custom numbers
arr = np.array(np.random.choice(range(10000), 10000, replace=False))
print(arr)
DF = pd.DataFrame(arr)
DF.to_csv("temp.csv")
| 0
| 0
| 0
|
f78a75c01086c2ca55a46920abf7034c2037b15f
| 2,062
|
py
|
Python
|
src/dataset.py
|
kantharajucn/job_seniority_prediction
|
cad9147ffddab1c5ead878c2f9d9e48199dc0da9
|
[
"Unlicense"
] | null | null | null |
src/dataset.py
|
kantharajucn/job_seniority_prediction
|
cad9147ffddab1c5ead878c2f9d9e48199dc0da9
|
[
"Unlicense"
] | null | null | null |
src/dataset.py
|
kantharajucn/job_seniority_prediction
|
cad9147ffddab1c5ead878c2f9d9e48199dc0da9
|
[
"Unlicense"
] | null | null | null |
import torch
from sklearn.preprocessing import LabelEncoder
from torch.utils.data import Dataset, DataLoader
| 33.258065
| 97
| 0.612027
|
import torch
from sklearn.preprocessing import LabelEncoder
from torch.utils.data import Dataset, DataLoader
class JobsDataset(Dataset):
def __init__(self, X, y, tokenizer, max_len=512):
self.len = len(X)
self.data = X
self.y = y
self.tokenizer = tokenizer
self.max_len = max_len
self._label_encode()
def _label_encode(self):
self.label_encoder = LabelEncoder()
self.y = self.label_encoder.fit_transform(self.y)
def __getitem__(self, index):
title = str(self.data.title[index])
title = " ".join(title.split())
description = str(self.data.description[index])
description = " ".join(description.split())
inputs = self.tokenizer.encode_plus(
text=title,
text_pair=description,
add_special_tokens=True,
max_length=self.max_len,
padding='max_length',
return_token_type_ids=True,
truncation=True
)
ids = inputs['input_ids']
mask = inputs['attention_mask']
return {
'ids': torch.tensor(ids, dtype=torch.long),
'mask': torch.tensor(mask, dtype=torch.long),
'targets': torch.tensor(self.y[index], dtype=torch.long)
}
def __len__(self):
return self.len
def get_data_loader(X_train, X_valid, y_train, y_valid, tokenizer, batch_size=16, num_workers=1):
training_set = JobsDataset(X_train, y_train, tokenizer, max_len=512)
validation_set = JobsDataset(X_valid, y_valid, tokenizer, max_len=512)
train_params = {'batch_size': batch_size,
'shuffle': True,
'num_workers': num_workers
}
test_params = {'batch_size': batch_size,
'shuffle': True,
'num_workers': num_workers
}
training_loader = DataLoader(training_set, **train_params)
validation_loader = DataLoader(validation_set, **test_params)
return training_loader, validation_loader
| 1,792
| 6
| 153
|