hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4909075b90f779efa0cc283e1cf15a85409c64e5 | 1,003 | py | Python | pycodegen/frontend/frontend_cpp/__init__.py | blejdfist/pycodegen | b7a7fad2c9e0a537893e53df0e07544d047e443d | [
"MIT"
] | 5 | 2019-02-15T16:13:43.000Z | 2021-07-22T02:54:57.000Z | pycodegen/frontend/frontend_cpp/__init__.py | blejdfist/pycodegen | b7a7fad2c9e0a537893e53df0e07544d047e443d | [
"MIT"
] | 1 | 2019-12-06T20:24:36.000Z | 2020-05-04T18:43:12.000Z | pycodegen/frontend/frontend_cpp/__init__.py | blejdfist/pycodegen | b7a7fad2c9e0a537893e53df0e07544d047e443d | [
"MIT"
] | null | null | null | """C/C++ parser frontend based on libclang"""
import argparse
import logging
import sys
_LOGGER = logging.getLogger(__name__)
| 27.861111 | 86 | 0.65005 | """C/C++ parser frontend based on libclang"""
import argparse
import logging
import sys
_LOGGER = logging.getLogger(__name__)
def register_arguments(argument_parser):
argument_parser.add_argument("--args", nargs=argparse.REMAINDER,
help="Arguments to pass to clang")
argument_parser.add_argument("--print-ast", action="store_true",
help="Print AST to console")
def run(filename, options=None):
try:
import clang.cindex
except ModuleNotFoundError:
_LOGGER.error("To use the C++ frontend you must have clang>=6.0.0 installed.")
_LOGGER.error("Try installing it using: pip install 'pycodegen[CPP]'")
sys.exit(1)
from .parser_libclang import ParserLibClang
if options is None:
options = {}
parser = ParserLibClang()
if options.get('print_ast'):
print(parser.dump(filename, options.get('args')))
return parser.parse(filename, options.get('args'))
| 828 | 0 | 46 |
4ef3ac76c9db1a0b2024af4c2263d9bea69f7d99 | 1,773 | py | Python | pgscout/ScoutGuard.py | SuspectJohnny/PGScout | 35e3209b681942977be82544616d8dd4a5262574 | [
"Apache-2.0"
] | null | null | null | pgscout/ScoutGuard.py | SuspectJohnny/PGScout | 35e3209b681942977be82544616d8dd4a5262574 | [
"Apache-2.0"
] | null | null | null | pgscout/ScoutGuard.py | SuspectJohnny/PGScout | 35e3209b681942977be82544616d8dd4a5262574 | [
"Apache-2.0"
] | null | null | null | import logging
import sys
import time
from pgscout.Scout import Scout
from pgscout.config import use_pgpool
from pgscout.utils import load_pgpool_accounts
log = logging.getLogger(__name__)
| 31.660714 | 118 | 0.599549 | import logging
import sys
import time
from pgscout.Scout import Scout
from pgscout.config import use_pgpool
from pgscout.utils import load_pgpool_accounts
log = logging.getLogger(__name__)
class ScoutGuard(object):
def __init__(self, auth, username, password, job_queue):
self.job_queue = job_queue
self.active = False
# Set up initial account
initial_account = {
'auth_service': auth,
'username': username,
'password': password
}
if not username and use_pgpool():
initial_account = load_pgpool_accounts(1, reuse=True)
self.acc = self.init_scout(initial_account)
self.active = True
def init_scout(self, acc_data):
return Scout(acc_data['auth_service'], acc_data['username'], acc_data['password'], self.job_queue)
def run(self):
while True:
self.active = True
self.acc.run()
self.active = False
self.acc.release(reason=self.acc.last_msg)
# Scout disabled, probably (shadow)banned.
if use_pgpool():
self.swap_account()
else:
# We don't have a replacement account, so just wait a veeeery long time.
time.sleep(60*60*24*1000)
break
def swap_account(self):
while True:
new_acc = load_pgpool_accounts(1)
if new_acc:
log.info("Swapping bad account {} with new account {}".format(self.acc.username, new_acc['username']))
self.acc = self.init_scout(new_acc)
break
log.warning("Could not request new account from PGPool. Out of accounts? Retrying in 1 minute.")
time.sleep(60)
| 1,446 | 4 | 131 |
3fadd272a1dd660ba3aac05b14b52bd48175a9ea | 4,027 | py | Python | am/legislative/schema.py | access-missouri/am-django-project | 2457b8089900c61c73000c1d7479b7a72f6d1855 | [
"BSD-2-Clause"
] | 4 | 2018-05-01T20:31:49.000Z | 2021-12-20T19:30:40.000Z | am/legislative/schema.py | access-missouri/am-django-project | 2457b8089900c61c73000c1d7479b7a72f6d1855 | [
"BSD-2-Clause"
] | 22 | 2017-04-13T15:02:09.000Z | 2021-02-02T21:48:41.000Z | am/legislative/schema.py | access-missouri/am-django-project | 2457b8089900c61c73000c1d7479b7a72f6d1855 | [
"BSD-2-Clause"
] | 1 | 2018-07-02T20:08:43.000Z | 2018-07-02T20:08:43.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from graphene import relay, ObjectType, String
from graphene_django.types import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from legislative import models as LM
| 35.955357 | 82 | 0.627514 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from graphene import relay, ObjectType, String
from graphene_django.types import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from legislative import models as LM
class BillNode(DjangoObjectType):
absolute_url = String()
admin_url = String()
bill_status_text = String()
def resolve_absolute_url(instance, info, **kwargs):
return instance.get_absolute_url()
def resolve_admin_url(instance, info, **kwargs):
return instance.get_admin_url()
def resolve_bill_Status(instance, info, **kwargs):
return instance.get_bill_status()
class Meta:
model = LM.Bill
filter_fields = {
'identifier': ['exact', 'icontains', 'istartswith'],
'legislative_session__name': ['exact', 'icontains', 'istartswith'],
'from_organization__name': ['exact', 'icontains', 'istartswith'],
'title': ['exact', 'icontains', 'istartswith'],
'lr_number': ['exact', 'icontains', 'istartswith'],
'description': ['exact', 'icontains', 'istartswith'],
}
interfaces = (relay.Node, )
class BillActionNode(DjangoObjectType):
class Meta:
model = LM.BillAction
filter_fields = {
'bill__identifier': ['exact', 'icontains', 'istartswith'],
'bill__title': ['exact', 'icontains', 'istartswith'],
'organization__name': ['exact', 'icontains', 'istartswith'],
'date': ['year', 'month', 'day'],
}
interfaces = (relay.Node,)
class BillSponsorshipNode(DjangoObjectType):
class Meta:
model = LM.BillSponsorship
filter_fields = {
'bill__identifier': ['exact', 'icontains', 'istartswith'],
'bill__title': ['exact', 'icontains', 'istartswith'],
'person__index_name': ['exact', 'icontains', 'istartswith'],
'member__person': ['exact'],
'member__session': ['exact'],
'member__session__name': ['exact', 'icontains', 'istartswith'],
'sponsored_at': ['year', 'month', 'day'],
}
interfaces = (relay.Node,)
class LegislativeSessionNode(DjangoObjectType):
absolute_url = String()
def resolve_absolute_url(instance, info, **kwargs):
return instance.get_absolute_url()
class Meta:
model = LM.LegislativeSession
filter_fields = {
'name': ['exact', 'icontains', 'istartswith'],
'classification': ['exact', 'icontains', 'istartswith'],
'start_date': ['year', 'month', 'day'],
'end_date': ['year', 'month', 'day'],
}
interfaces = (relay.Node,)
class BodyMembershipNode(DjangoObjectType):
absolute_url = String()
def resolve_absolute_url(instance, info, **kwargs):
return instance.get_absolute_url()
class Meta:
model = LM.BodyMembership
filter_fields = {
'person__index_name': ['exact', 'icontains', 'istartswith'],
'person__first_name': ['exact', 'icontains', 'istartswith'],
'person__last_name': ['exact', 'icontains', 'istartswith'],
'person__suffix': ['exact', 'icontains', 'istartswith'],
'body': ['exact', 'icontains', 'istartswith'],
'session__name': ['exact', 'icontains', 'istartswith'],
'district__name': ['exact', 'icontains', 'istartswith'],
}
interfaces = (relay.Node,)
class Query(object):
bill = relay.Node.Field(BillNode)
all_bills = DjangoFilterConnectionField(BillNode)
bill_action = relay.Node.Field(BillActionNode)
all_bill_actions = DjangoFilterConnectionField(BillActionNode)
legislative_session = relay.Node.Field(LegislativeSessionNode)
all_legislative_sessions = DjangoFilterConnectionField(LegislativeSessionNode)
body_membership = relay.Node.Field(BodyMembershipNode)
all_body_memberships = DjangoFilterConnectionField(BodyMembershipNode)
| 357 | 3,284 | 138 |
d1cc6225724878d5dd3d8e4214d652acbfc7f11c | 1,123 | py | Python | techminer2/most_local_cited_authors.py | jdvelasq/techminer2 | ad64a49402749755798a18417c38a7ad10e83bad | [
"MIT"
] | null | null | null | techminer2/most_local_cited_authors.py | jdvelasq/techminer2 | ad64a49402749755798a18417c38a7ad10e83bad | [
"MIT"
] | null | null | null | techminer2/most_local_cited_authors.py | jdvelasq/techminer2 | ad64a49402749755798a18417c38a7ad10e83bad | [
"MIT"
] | null | null | null | """
Most local cited authors in references
===============================================================================
See :doc:`column indicators <column_indicators>` to obtain a `pandas.Dataframe`
with the data. Use the following code:
.. code:: python
column_indicators(
column="authors",
directory=directory,
file_name="references.csv",
)
>>> from techminer2 import *
>>> directory = "data/"
>>> file_name = "sphinx/_static/most_local_cited_authors.html"
>>> most_local_cited_authors(
... top_n=20,
... directory=directory,
... ).write_html(file_name)
.. raw:: html
<iframe src="_static/most_local_cited_authors.html" height="600px" width="100%" frameBorder="0"></iframe>
"""
from .cleveland_chart import cleveland_chart
| 22.019608 | 109 | 0.606411 | """
Most local cited authors in references
===============================================================================
See :doc:`column indicators <column_indicators>` to obtain a `pandas.Dataframe`
with the data. Use the following code:
.. code:: python
column_indicators(
column="authors",
directory=directory,
file_name="references.csv",
)
>>> from techminer2 import *
>>> directory = "data/"
>>> file_name = "sphinx/_static/most_local_cited_authors.html"
>>> most_local_cited_authors(
... top_n=20,
... directory=directory,
... ).write_html(file_name)
.. raw:: html
<iframe src="_static/most_local_cited_authors.html" height="600px" width="100%" frameBorder="0"></iframe>
"""
from .cleveland_chart import cleveland_chart
def most_local_cited_authors(
top_n=20,
directory="./",
):
return cleveland_chart(
column="authors",
top_n=top_n,
min_occ=None,
max_occ=None,
directory=directory,
metric="local_citations",
title="Most local cited authors",
file_name="references.csv",
)
| 312 | 0 | 23 |
d4d4167b896d5de17790ec88ba387137143b6306 | 1,890 | py | Python | scripts/networks_theoretical_bounds_analysis.py | PuchatekwSzortach/voc_ssd | 4bf1013c5243b5a76e4c1a392c8d1c3076c72a90 | [
"MIT"
] | 1 | 2020-01-22T07:13:12.000Z | 2020-01-22T07:13:12.000Z | scripts/networks_theoretical_bounds_analysis.py | PuchatekwSzortach/voc_ssd | 4bf1013c5243b5a76e4c1a392c8d1c3076c72a90 | [
"MIT"
] | null | null | null | scripts/networks_theoretical_bounds_analysis.py | PuchatekwSzortach/voc_ssd | 4bf1013c5243b5a76e4c1a392c8d1c3076c72a90 | [
"MIT"
] | 1 | 2021-11-10T22:02:36.000Z | 2021-11-10T22:02:36.000Z | """
Script for analyzing theoretical bounds on model's recall
"""
import argparse
import sys
import yaml
import tqdm
import net.data
import net.ssd
import net.utilities
def analyse_theoretical_performance(config, ssd_model_configuration):
"""
Analyse theoretical performance of SSD model on VOC dataset
"""
voc_samples_loader = net.data.VOCSamplesDataLoader(
data_directory=config["voc"]["data_directory"],
data_set_path=config["voc"]["validation_set_path"],
categories=config["categories"],
size_factor=config["size_factor"])
matching_analysis_generator = net.ssd.get_matching_analysis_generator(
ssd_model_configuration=ssd_model_configuration,
ssd_input_generator=iter(voc_samples_loader),
threshold=0.5
)
matched_annotations = []
unmatched_annotations = []
for _ in tqdm.tqdm(range(len(voc_samples_loader))):
single_image_matched_annotations, single_image_unmatched_annotations = next(matching_analysis_generator)
matched_annotations.extend(single_image_matched_annotations)
unmatched_annotations.extend(single_image_unmatched_annotations)
theoretical_recall = len(matched_annotations) / (len(matched_annotations) + len(unmatched_annotations))
print("Theoretical recall: {}".format(theoretical_recall))
# Analyze failures
net.utilities.analyze_annotations(unmatched_annotations)
def main():
"""
Script entry point
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', action="store", required=True)
arguments = parser.parse_args(sys.argv[1:])
with open(arguments.config) as file:
config = yaml.safe_load(file)
analyse_theoretical_performance(
config=config,
ssd_model_configuration=config["vggish_model_configuration"])
if __name__ == "__main__":
main()
| 26.25 | 112 | 0.732804 | """
Script for analyzing theoretical bounds on model's recall
"""
import argparse
import sys
import yaml
import tqdm
import net.data
import net.ssd
import net.utilities
def analyse_theoretical_performance(config, ssd_model_configuration):
"""
Analyse theoretical performance of SSD model on VOC dataset
"""
voc_samples_loader = net.data.VOCSamplesDataLoader(
data_directory=config["voc"]["data_directory"],
data_set_path=config["voc"]["validation_set_path"],
categories=config["categories"],
size_factor=config["size_factor"])
matching_analysis_generator = net.ssd.get_matching_analysis_generator(
ssd_model_configuration=ssd_model_configuration,
ssd_input_generator=iter(voc_samples_loader),
threshold=0.5
)
matched_annotations = []
unmatched_annotations = []
for _ in tqdm.tqdm(range(len(voc_samples_loader))):
single_image_matched_annotations, single_image_unmatched_annotations = next(matching_analysis_generator)
matched_annotations.extend(single_image_matched_annotations)
unmatched_annotations.extend(single_image_unmatched_annotations)
theoretical_recall = len(matched_annotations) / (len(matched_annotations) + len(unmatched_annotations))
print("Theoretical recall: {}".format(theoretical_recall))
# Analyze failures
net.utilities.analyze_annotations(unmatched_annotations)
def main():
"""
Script entry point
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', action="store", required=True)
arguments = parser.parse_args(sys.argv[1:])
with open(arguments.config) as file:
config = yaml.safe_load(file)
analyse_theoretical_performance(
config=config,
ssd_model_configuration=config["vggish_model_configuration"])
if __name__ == "__main__":
main()
| 0 | 0 | 0 |
f4ca648ec083b50f3ce5b91759643c2cf7dca9fa | 16,182 | py | Python | examples/lidar_tour_traffic_data_analysis.py | MissMeriel/BeamNGpy | a8467c57537441802bc5b56f0012dfee2b5f5af0 | [
"MIT"
] | 1 | 2021-08-10T19:29:52.000Z | 2021-08-10T19:29:52.000Z | examples/lidar_tour_traffic_data_analysis.py | MissMeriel/BeamNGpy | a8467c57537441802bc5b56f0012dfee2b5f5af0 | [
"MIT"
] | null | null | null | examples/lidar_tour_traffic_data_analysis.py | MissMeriel/BeamNGpy | a8467c57537441802bc5b56f0012dfee2b5f5af0 | [
"MIT"
] | null | null | null | import sys
from time import sleep
import numpy as np
from beamngpy.beamngcommon import *
import time
import random, copy
import math
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from matplotlib.pyplot import imshow
import pandas as pd
import seaborn as sn
import matplotlib.pyplot as plt
from scipy.stats import norm
from astropy import modeling
import shutil
import sklearn
from sklearn import cluster
# format data for dataframe
if __name__ == '__main__':
main()
| 38.165094 | 131 | 0.559016 | import sys
from time import sleep
import numpy as np
from beamngpy.beamngcommon import *
import time
import random, copy
import math
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from matplotlib.pyplot import imshow
import pandas as pd
import seaborn as sn
import matplotlib.pyplot as plt
from scipy.stats import norm
from astropy import modeling
import shutil
import sklearn
from sklearn import cluster
def diff_damage(damage, damage_prev):
new_damage = 0
if damage is None or damage_prev is None:
return 0
new_damage = damage['damage'] - damage_prev['damage']
return new_damage
def make_gaussian(crash_vals):
mean, std = norm.fit(crash_vals)
plt.hist(crash_vals, bins=30) #, normed=True)
xmin, xmax = plt.xlim()
print("mean:{} std:{} xmin:{} xmax:{}".format(mean, std, xmin, xmax))
x = np.linspace(xmin, xmax, 100)
y = norm.pdf(x, mean, std)
plt.plot(x, y)
# fit line
# fitter = modeling.fitting.LevMarLSQFitter()
# model = modeling.models.Gaussian1D() # depending on the data you need to give some initial values
# fitted_model = fitter(model, x, crash_vals)
#
# plt.plot(x, crash_vals)
# plt.plot(x, fitted_model(x))
plt.show()
plt.pause(1)
def make_time_to_crash_histograms(time_to_crash_vals):
temp = [c for c in time_to_crash_vals if c < 45]
for i, binwidth in enumerate([1, 5, 10, 15]):
# set up plot
ax = plt.subplot(2, 2, i+1)
# draw plot
maximum = max(time_to_crash_vals)
ax.hist(temp, bins=int(maximum/binwidth), color='blue', edgecolor='black')
ax.set_title('Time to Crash \n(Binwidth={})'.format(binwidth), size=18)
ax.set_xlabel('Time (s)', size=10)
ax.set_ylabel('Runs', size=10)
plt.tight_layout()
plt.show()
plt.pause(1)
def make_crash_histograms(crash_vals):
temp = [c for c in crash_vals if c != 0]
for i, binwidth in enumerate([10, 100, 1000, 2000]):
# set up plot
ax = plt.subplot(2, 2, i+1)
# draw plot
maximum = max(crash_vals)
ax.hist(temp, bins=int(maximum/binwidth), color='blue', edgecolor='black')
ax.set_title('Crash Vehicle Damage \n(Binwidth={})'.format(binwidth), size=18)
ax.set_xlabel('Vehicle damage', size=10)
ax.set_ylabel('Runs', size=10)
plt.tight_layout()
plt.show()
plt.pause(1)
def make_histograms(crash_vals):
#for i, binwidth in enumerate([1000, 2000, 5000, 10000]):
for i, binwidth in enumerate([10, 100, 1000, 2000]):
# set up plot
ax = plt.subplot(2, 2, i+1)
# draw plot
maximum = max(crash_vals)
ax.hist(crash_vals, bins=int(maximum/binwidth), color='blue', edgecolor='black')
ax.set_title('All Vehicle Damage \n(Binwidth={})'.format(binwidth), size=18)
ax.set_xlabel('Vehicle damage', size=10)
ax.set_ylabel('Runs', size=10)
plt.tight_layout()
plt.show()
plt.pause(1)
def process_time_to_crash(ts):
print("ts:{}".format(ts))
crash_ts = [t for t in ts if t < 45]
crash_avg = sum(crash_ts) / len(crash_ts)
avg = sum(ts) / len(ts)
print("Number of crash traces:{}".format(len(crash_ts)))
print("Avg time to crash for crash traces:{}".format(crash_avg))
print("Avg time for all traces:{}".format(avg))
def collate_crash_files(directory="H:/experiment2/"):
dirs = os.listdir(directory)
print("dirs:{}".format(dirs))
for d in dirs:
files = os.listdir("{}{}".format(directory, d))
for filename in files:
full_filename = "{}{}/{}".format(directory, d, filename)
with open(full_filename,'r') as f:
print("reading {}".format(full_filename))
header = f.readline().replace("\n","").split(",") # get header
line = f.readline()
crash_val = 0
# header = ["TIMESTAMP","VEHICLE_POSITION","VEHICLE_ORIENTATION","VELOCITY","LIDAR","CRASH","EXTERNAL_VEHICLES"]
while line and crash_val == 0:
crash_val = float(line.split(",")[-2])
if crash_val != 0:
print("File {} contains crash with severity {}".format(filename, crash_val))
# copy file to collated crash directory
dst = 'H:/experiment_2_crashes/{{}'.format(filename)
print("copying", full_filename, " to ", dst)
shutil.copyfile(full_filename, dst)
break
line = f.readline()
def parse_files(directory="H:/experiment2/"):
#directory = "H:/experiment2/"
# set True if counting crashes
# Counting crashes is sloowwww
counting_crashes = True
dirs = os.listdir(directory)
print("dirs:{}".format(dirs))
crashes = 0
total_runs = 0
crash_vals = []
ts = []
for d in dirs:
files = os.listdir("{}{}".format(directory, d))
dir_crashes = 0
dir_crash_vals = []
for filename in files:
filename = "{}{}/{}".format(directory, d, filename)
total_runs += 1
if counting_crashes:
with open(filename,'r') as f:
#print("reading {}".format(filename))
t = 0
line = f.readline()
line = f.readline()
#header = ["TIMESTAMP","VEHICLE_POSITION","VEHICLE_ORIENTATION","VELOCITY","LIDAR","CRASH","EXTERNAL_VEHICLES"]
while line:
crash = line.split(",")
# print("crash[-1]:{}".format(crash[-1]))
# print("crash[-2]:{}".format(crash[-2]))
# print("crash[-3]:{}".format(crash[-3]))
#break
crash_val = float(crash[-2])
if crash_val != 0:
print("File {} contains crash with severity {}".format(filename, crash[-2]))
crashes += 1
dir_crashes += 1
crash_vals.append(float(crash[-2]))
dir_crash_vals.append(float(crash[-2]))
#ts.append(t)
break
line = f.readline()
t += 0.25
crash_vals.append(0)
ts.append(t)
print("Crashes in {}: {} ({} of {} runs)".format(d, dir_crashes, dir_crashes / float(len(files)), len(files)))
print("Avg crash severity:{}\n".format(sum(dir_crash_vals) / len(dir_crash_vals)))
#break
print("Total runs: {}".format(total_runs))
print("Total crashes: {}".format(crashes))
print("Crash rate: {}".format(crashes / total_runs))
print("Max crash severity:{}".format(max(crash_vals)))
print("Avg crash severity:{}".format(sum(crash_vals)/len(crash_vals)))
#print("Min crash val:{}".format(min(crash_vals)))
return crash_vals, ts
def separate_line(line, header):
sdline = dict.fromkeys(header)
linecomma = line.split(",")
# print(len(linecomma))
# print(linecomma[-4:-1])
# get single value entries
sdline['TIMESTAMP'] = linecomma[0]
sdline['CRASH'] = float(linecomma[-2])
sdline['EXTERNAL_VEHICLES'] = linecomma[-1].replace("\n","")
# get array value entries
linearr = line.split("[")
for str,h in zip(linearr[1:5], header[1:5]):
str = str.split("]")[0]
str = str.split(", ")
sdline[h] = [float(s) for s in str]
return sdline
# format data for dataframe
def format_data(dicts, header):
print("dicts none?", dicts is None)
collated_dict = dict.fromkeys(header)
for g in ["VEHICLE_POSITION", "VEHICLE_ORIENTATION", "VELOCITY"]:
for gg in ["_X","_Y","_Z"]:
collated_dict[g+gg] = None
del gg
del g
for h in header:
if h == "VEHICLE_POSITION" or h == "VEHICLE_ORIENTATION" or h == "VELOCITY":
for i,gg in enumerate(["_X","_Y","_Z"]):
temp = []
for d in dicts:
temp.append(d[h][i])
collated_dict[h+gg] = copy.deepcopy(temp)
if h == "LIDAR":
for i,gg in enumerate(["_MEAN","_VAR"]):
temp = []
for d in dicts:
if "MEAN" in gg:
temp.append(np.mean(d[h]))
elif "VAR" in gg:
temp.append(np.var(d[h]))
collated_dict[h+gg] = copy.deepcopy(temp)
else:
temp = []
for d in dicts:
temp.append(d[h])
collated_dict[h] = copy.deepcopy(temp)
for h in header:
if h != "LIDAR":
print("collated_dict", h, collated_dict[h])
# keys = collated_dict.keys()
# for h in keys:
# if collated_dict[h] == None:
# collated_dict.pop(h)
return collated_dict
def parse_files_endstate(directory="H:/experiment2/"):
#directory = "H:/experiment2/"
# set True if counting crashes
# Counting crashes is sloowwww
counting_crashes = True
dirs = os.listdir(directory)
print("dirs:{}".format(dirs))
crashes = 0
total_runs = 0
crash_vals = []
all_dicts = []
for d in dirs:
files = os.listdir("{}{}".format(directory, d))
dir_dicts = []
for filename in files:
filename = "{}{}/{}".format(directory, d, filename)
total_runs += 1
with open(filename,'r') as f:
print("reading {}".format(filename))
t = 0
header = f.readline().replace("\n","").split(",") # get header
# print("header", header)
# lastline = f.readlines()[-1]
# print(lastline)
line = f.readline()
crash_val = 0
# header = ["TIMESTAMP","VEHICLE_POSITION","VEHICLE_ORIENTATION","VELOCITY","LIDAR","CRASH","EXTERNAL_VEHICLES"]
while line and crash_val == 0:
crash_val = float(line.split(",")[-2])
if crash_val != 0:
print("File {} contains crash with severity {}".format(filename, crash_val))
separated_line_dict = separate_line(line, header)
dir_dicts.append(copy.deepcopy(separated_line_dict))
line = f.readline()
all_dicts.extend(dir_dicts)
data = format_data(dir_dicts, header)
df = pd.DataFrame(data, columns=data.keys())
print(df)
corrMatrix = df.corr()
print(corrMatrix)
figure(figsize=(10,10), dpi=80)
sn.heatmap(corrMatrix, annot=True)
plt.title("{} Pearson Correlation".format(d))
plt.gcf().subplots_adjust(left=0.21, bottom=0.25)
plt.show()
# plt.savefig(directory + "{}-Pearson-Correlation.jpg".format(d))
plt.pause(0.01)
# print("Crashes in {}: {} ({} of {} runs)".format(d, dir_crashes, dir_crashes / float(len(files)), len(files)))
# print("Avg crash severity:{}\n".format(sum(dir_crash_vals) / len(dir_crash_vals)))
# #break
data = format_data(dir_dicts, header)
df = pd.DataFrame(data, columns=data.keys())
print("dataframe:\n", df)
corrMatrix = df.corr()
print(corrMatrix)
figure(figsize=(10, 10), dpi=80)
sn.heatmap(corrMatrix, annot=True)
plt.title("All Traces Pearson Correlation")
plt.gcf().subplots_adjust(left=0.25, bottom=0.25)
plt.show()
# plt.savefig(directory+"All-Traces-Pearson-Correlation.jpg")
plt.pause(0.01)
print("Total runs: {}".format(total_runs))
print("Total crashes: {}".format(crashes))
print("Crash rate: {}".format(crashes / total_runs))
print("Max crash severity:{}".format(max(crash_vals)))
print("Avg crash severity:{}".format(sum(crash_vals)/len(crash_vals)))
#print("Min crash val:{}".format(min(crash_vals)))
return crash_vals, ts
def create_feature_vector(line_dict):
arr = []
for k in line_dict.keys():
if k != "LIDAR":
if isinstance(line_dict[k], list):
arr.extend(line_dict[k])
else:
arr.append(float(line_dict[k]))
return arr
def parse_files_equiv(directory="H:/experiment2_crashes/"):
files = os.listdir(directory)
crash_sigs = {}
for filename in files:
if ".csv" in filename:
full_filename = "{}/{}".format(directory, filename)
with open(full_filename,'r') as f:
# print("reading {}".format(full_filename))
header = f.readline().replace("\n","").split(",") # get header
line = f.readline()
crash_val = 0
# header = ["TIMESTAMP","VEHICLE_POSITION","VEHICLE_ORIENTATION","VELOCITY","LIDAR","CRASH","EXTERNAL_VEHICLES"]
while line and crash_val == 0:
crash_val = float(line.split(",")[-2])
if crash_val != 0:
print("File {} contains crash with severity {}".format(filename, crash_val))
separated_line_dict = separate_line(line, header)
arr = create_feature_vector(separated_line_dict)
crash_sigs[filename] = copy.deepcopy(arr)
break
line = f.readline()
df = pd.DataFrame(crash_sigs, columns=crash_sigs.keys())
print("dataframe:\n", df)
corrMatrix = df.corr()
# print(corrMatrix)
# GROUP BY HIGHEST CORRELATION ONLY
c = df.corr()#.abs()
s = c.unstack()
so = s.sort_values(kind="quicksort")
# print("len(so)", len(so))
# print(so[-296:-148])
new = so[-296:-148]
# array of sets
groups = np.array([])
for i in new.items():
# file names are i[0][0] and i[0][1]
added = False
for group in groups:
if i[0][0] in group or i[0][1] in group:
group.add(i[0][0])
group.add(i[0][1])
added = True
if not added:
groups = np.append(groups, {i[0][0], i[0][1]})
# REGROUP BY SECOND HIGHEST CORRELATION -- use second most correlated grouping to consolidate groups
print(so[-444:-296])
new = so[-444:-296]
for i in new.items():
# file names are i[0][0] and i[0][1]
added = False
for group in groups:
if i[0][0] in group or i[0][1] in group:
group.add(i[0][0])
group.add(i[0][1])
added = True
if not added:
groups = np.append(groups, {i[0][0], i[0][1]})
# print finished groups
print("\nCORRELATION GROUPS ({}):".format(groups.shape))
for g in groups:
print(g)
# correlation is i[1]
# # Convert DataFrame to matrix
# mat = df.values
# # Using sklearn
# km = cluster.KMeans(n_clusters=5)
# km.fit(mat)
# # Get cluster assignment labels
# labels = km.labels_
# # Format results as a DataFrame
# results = pd.DataFrame(data=labels, columns=['cluster'])
# print("SKLEARN RESULTS ({}, {}): ".format(type(results), len(results)))
# print(results)
# sz = 150
# dpi = 100
# figure(figsize=(sz, sz), dpi=100)
# sn.heatmap(corrMatrix, annot=True)
# plt.title("All Traces Pearson Correlation")
# plt.gcf().subplots_adjust(left=0.25, bottom=0.25)
# plt.show()
# plt.pause(0.01)
# plt.savefig(directory+"All-Traces-Pearson-Correlation-{}x{}dpi={}.png".format(sz,sz,dpi))
# find highest correlations
def main():
overallbegin = time.time()
# collate_crash_files()
# crash_vals, ts = parse_files()
# crash_vals, ts = parse_files_endstate()
parse_files_equiv()
# make_histograms(crash_vals)
# make_crash_histograms(crash_vals)
# make_time_to_crash_histograms(ts)
# make_gaussian(crash_vals)
# process_time_to_crash(ts)
if __name__ == '__main__':
main()
| 15,364 | 0 | 321 |
d0f29bd121a2f242a9f7c96ec14a606691ede76d | 1,346 | py | Python | microbenchmarks/callables.py | 97littleleaf11/mypyc-benchmarks | 30661c7ffc30d6c1c4fc4e45e581e5aec4a5361c | [
"MIT"
] | 13 | 2020-05-03T11:18:41.000Z | 2021-11-22T06:42:57.000Z | microbenchmarks/callables.py | 97littleleaf11/mypyc-benchmarks | 30661c7ffc30d6c1c4fc4e45e581e5aec4a5361c | [
"MIT"
] | 23 | 2020-05-03T11:18:35.000Z | 2021-09-03T12:55:21.000Z | microbenchmarks/callables.py | 97littleleaf11/mypyc-benchmarks | 30661c7ffc30d6c1c4fc4e45e581e5aec4a5361c | [
"MIT"
] | 5 | 2021-06-12T01:25:57.000Z | 2022-03-17T17:50:46.000Z | from typing import Callable
from benchmarking import benchmark
@benchmark
@benchmark
@benchmark
| 15.651163 | 53 | 0.526746 | from typing import Callable
from benchmarking import benchmark
@benchmark
def nested_func() -> None:
n = 0
for i in range(100 * 1000):
n += call_nested_fast()
assert n == 5500000, n
def call_nested_fast() -> int:
n = 0
def add(d: int) -> None:
nonlocal n
n += d
for i in range(10):
add(i)
n += 1
return n
@benchmark
def nested_func_escape() -> None:
n = 0
for i in range(100 * 1000):
n = nested_func_inner(n)
assert n == 300000, n
def nested_func_inner(n: int) -> int:
def add(d: int) -> None:
nonlocal n
n += d
invoke(add)
return n
def invoke(f: Callable[[int], None]) -> None:
for i in range(3):
f(i)
@benchmark
def method_object() -> None:
a = []
for i in range(5):
a.append(Adder(i))
a.append(Adder2(i))
n = 0
for i in range(100 * 1000):
for adder in a:
n = adjust(n, adder.add)
assert n == 7500000, n
def adjust(n: int, add: Callable[[int], int]) -> int:
for i in range(3):
n = add(n)
return n
class Adder:
def __init__(self, n: int) -> None:
self.n = n
def add(self, x: int) -> int:
return self.n + x
class Adder2(Adder):
def add(self, x: int) -> int:
return self.n + x + 1
| 964 | -10 | 283 |
ab2dbea6e1fc2af0c93dae92cb5a85ec8dcd206c | 974 | py | Python | flask/app.py | apple800/python_study | 29864a6b4389d47c5d59f6459bee7c7fd09cceb5 | [
"MIT"
] | null | null | null | flask/app.py | apple800/python_study | 29864a6b4389d47c5d59f6459bee7c7fd09cceb5 | [
"MIT"
] | null | null | null | flask/app.py | apple800/python_study | 29864a6b4389d47c5d59f6459bee7c7fd09cceb5 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request, redirect, url_for
# Init app
app = Flask(__name__)
db = []
# URI, endpoint
@app.route('/', methods=['GET', 'POST'])
@app.route('/delete/<task>', methods=['GET'])
# Update list
# @app.route('/update/<task>', methods=['GET'])
# def update(task):
# num = db.index(task)
# text = '수정'
# db[num] = text
# return redirect(url_for('main'))
if __name__ == '__main__':
# Only in development
app.run(debug=True)
| 21.644444 | 68 | 0.603696 | from flask import Flask, render_template, request, redirect, url_for
# Init app
app = Flask(__name__)
db = []
# URI, endpoint
@app.route('/', methods=['GET', 'POST'])
def main():
if request.method == 'POST':
new_task = request.form['new_task']
modify_task = request.form['mo']
num = request.form['num']
if len(new_task) > 0 and new_task not in db:
db.append(new_task)
if len(modify_task) > 0 and new_task not in db:
db[int(num)-1] = modify_task
return render_template('index.html', todo=db, name='Bin')
@app.route('/delete/<task>', methods=['GET'])
def delete(task):
db.remove(task)
return redirect(url_for('main'))
# Update list
# @app.route('/update/<task>', methods=['GET'])
# def update(task):
# num = db.index(task)
# text = '수정'
# db[num] = text
# return redirect(url_for('main'))
if __name__ == '__main__':
# Only in development
app.run(debug=True)
| 442 | 0 | 44 |
fbea0d257d3da6c82d0b9bbeaff1d1aaf62a8831 | 795 | py | Python | neodroid/messaging/__init__.py | sintefneodroid/neo | 0999f1dff95c4a8c5880a9b3add532d74f38586a | [
"Apache-2.0"
] | 7 | 2017-09-13T08:28:37.000Z | 2022-01-21T15:59:14.000Z | neodroid/messaging/__init__.py | sintefneodroid/neo | 0999f1dff95c4a8c5880a9b3add532d74f38586a | [
"Apache-2.0"
] | 25 | 2019-03-25T13:49:43.000Z | 2019-05-02T13:58:13.000Z | neodroid/messaging/__init__.py | sintefneodroid/neo | 0999f1dff95c4a8c5880a9b3add532d74f38586a | [
"Apache-2.0"
] | 2 | 2017-09-21T10:14:39.000Z | 2017-10-21T09:57:04.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Christian Heider Nielsen"
import logging
from enum import Enum, auto
from functools import wraps
| 25.645161 | 56 | 0.631447 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Christian Heider Nielsen"
import logging
from enum import Enum, auto
from functools import wraps
class ClientEvents(Enum):
CONNECTED = auto()
DISCONNECTED = auto()
TIMEOUT = auto()
def message_client_event(event):
def receive_func(func):
@wraps(func)
def call_func(ctx, *args, **kwargs):
if event is ClientEvents.CONNECTED:
logging.info("Connected to server")
elif event is ClientEvents.DISCONNECTED:
logging.info("Disconnected from server")
elif event is ClientEvents.TIMEOUT:
logging.warning("Connection timeout")
return func(ctx, *args, **kwargs)
return call_func
return receive_func
| 514 | 74 | 46 |
7abe810fd81a21cf6205eb72f6adcda029373c0e | 1,329 | py | Python | tests/template_tests/filter_tests/test_removetags.py | DasAllFolks/django | 9f427617e4559012e1c2fd8fce46cbe225d8515d | [
"BSD-3-Clause"
] | 1 | 2015-01-09T08:45:54.000Z | 2015-01-09T08:45:54.000Z | tests/template_tests/filter_tests/test_removetags.py | DasAllFolks/django | 9f427617e4559012e1c2fd8fce46cbe225d8515d | [
"BSD-3-Clause"
] | null | null | null | tests/template_tests/filter_tests/test_removetags.py | DasAllFolks/django | 9f427617e4559012e1c2fd8fce46cbe225d8515d | [
"BSD-3-Clause"
] | null | null | null | import warnings
from django.test import SimpleTestCase
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.safestring import mark_safe
from ..utils import render, setup
| 34.973684 | 100 | 0.514673 | import warnings
from django.test import SimpleTestCase
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.safestring import mark_safe
from ..utils import render, setup
class RemovetagsTests(SimpleTestCase):
@setup({'removetags01': '{{ a|removetags:"a b" }} {{ b|removetags:"a b" }}'})
def test_removetags01(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', RemovedInDjango20Warning)
output = render(
'removetags01',
{
'a': '<a>x</a> <p><b>y</b></p>',
'b': mark_safe('<a>x</a> <p><b>y</b></p>'),
},
)
self.assertEqual(output, 'x <p>y</p> x <p>y</p>')
@setup({'removetags02':
'{% autoescape off %}{{ a|removetags:"a b" }} {{ b|removetags:"a b" }}{% endautoescape %}'})
def test_removetags02(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', RemovedInDjango20Warning)
output = render(
'removetags02',
{
'a': '<a>x</a> <p><b>y</b></p>',
'b': mark_safe('<a>x</a> <p><b>y</b></p>'),
},
)
self.assertEqual(output, 'x <p>y</p> x <p>y</p>')
| 824 | 282 | 23 |
ce27e0fd1a63289b96eec99e747558875b354786 | 88 | py | Python | P3_9.py | JosefinaMedina/EjerciciosComputacion-Python | 98f04171ce954e4eb9b20adfc98b0351e6016deb | [
"Apache-2.0"
] | null | null | null | P3_9.py | JosefinaMedina/EjerciciosComputacion-Python | 98f04171ce954e4eb9b20adfc98b0351e6016deb | [
"Apache-2.0"
] | null | null | null | P3_9.py | JosefinaMedina/EjerciciosComputacion-Python | 98f04171ce954e4eb9b20adfc98b0351e6016deb | [
"Apache-2.0"
] | null | null | null | import numpy as np
n=int(input("Dimension de la matriz: "))
print(np.identity(n)) | 17.6 | 41 | 0.670455 | import numpy as np
n=int(input("Dimension de la matriz: "))
print(np.identity(n)) | 0 | 0 | 0 |
02e08aa2c67dd5c15617f863e1ff3ce9467f1dfd | 2,557 | py | Python | src/horner.py | dhermes/k-compensated-de-casteljau | 8511f0c2c525ac24215f6307e80032329f97301d | [
"Apache-2.0"
] | 2 | 2020-02-22T15:45:20.000Z | 2020-12-03T07:56:01.000Z | src/horner.py | dhermes/k-compensated-de-casteljau | 8511f0c2c525ac24215f6307e80032329f97301d | [
"Apache-2.0"
] | null | null | null | src/horner.py | dhermes/k-compensated-de-casteljau | 8511f0c2c525ac24215f6307e80032329f97301d | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Performs Horner's method.
Horner's method computes
.. math::
p(x) = a_n x^n + \cdots a_1 x + a_0
via
.. math::
\begin{align*}
p_n &= a_n \\
p_k &= p_{k + 1} x + a_k \\
p(x) &= p_0
\end{align*}
This module provides both the standard version and a compensated version.
.. note::
This assumes throughout that ``coeffs`` is ordered from
:math:`a_n` to :math:`a_0`.
"""
import eft
| 22.628319 | 74 | 0.594056 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Performs Horner's method.
Horner's method computes
.. math::
p(x) = a_n x^n + \cdots a_1 x + a_0
via
.. math::
\begin{align*}
p_n &= a_n \\
p_k &= p_{k + 1} x + a_k \\
p(x) &= p_0
\end{align*}
This module provides both the standard version and a compensated version.
.. note::
This assumes throughout that ``coeffs`` is ordered from
:math:`a_n` to :math:`a_0`.
"""
import eft
def basic(x, coeffs):
if not coeffs:
return 0.0
p = coeffs[0]
for coeff in coeffs[1:]:
p = p * x + coeff
return p
def _compensated(x, coeffs):
if not coeffs:
return 0.0, [], []
p = coeffs[0]
e_pi = []
e_sigma = []
for coeff in coeffs[1:]:
prod, e1 = eft.multiply_eft(p, x)
p, e2 = eft.add_eft(prod, coeff)
e_pi.append(e1)
e_sigma.append(e2)
return p, e_pi, e_sigma
def compensated(x, coeffs):
p, e_pi, e_sigma = _compensated(x, coeffs)
# Compute the error via standard Horner's.
e = 0.0
for e1, e2 in zip(e_pi, e_sigma):
e = x * e + (e1 + e2)
return p + e
def compensated3(x, coeffs):
h1, p2, p3 = _compensated(x, coeffs)
h2, p4, p5 = _compensated(x, p2)
h3, p6, p7 = _compensated(x, p3)
# Use standard Horner from here.
h4 = basic(x, p4)
h5 = basic(x, p5)
h6 = basic(x, p6)
h7 = basic(x, p7)
# Now use 3-fold summation.
p = [h1, h2, h3, h4, h5, h6, h7]
return eft.sum_k(p, 3)
def compensated_k(x, coeffs, k):
h = {}
p = {1: coeffs}
# First, "filter" off the errors from the interior
# polynomials.
for i in range(1, 2 ** (k - 1)):
h[i], p[2 * i], p[2 * i + 1] = _compensated(x, p[i])
# Then use standard Horner for the leaf polynomials.
for i in range(2 ** (k - 1), 2 ** k):
h[i] = basic(x, p[i])
# Now use K-fold summation on everything in ``h`` (but keep the
# order).
to_sum = [h[i] for i in range(1, 2 ** k)]
return eft.sum_k(to_sum, k)
| 1,471 | 0 | 115 |
4b5221cd58c1dfa6507ede60d4aff71e4b131d0f | 248 | py | Python | datalad/plugin/wtf.py | mikapfl/datalad | 7b407ecbbfbbea0789304a640bac721d1718e72d | [
"MIT"
] | 298 | 2015-01-25T17:36:29.000Z | 2022-03-20T03:38:47.000Z | datalad/plugin/wtf.py | mikapfl/datalad | 7b407ecbbfbbea0789304a640bac721d1718e72d | [
"MIT"
] | 6,387 | 2015-01-02T18:15:01.000Z | 2022-03-31T20:58:58.000Z | datalad/plugin/wtf.py | mikapfl/datalad | 7b407ecbbfbbea0789304a640bac721d1718e72d | [
"MIT"
] | 109 | 2015-01-25T17:49:40.000Z | 2022-03-06T06:54:54.000Z | import warnings
warnings.warn(
"datalad.plugin.wtf is deprecated and will be removed in a future "
"release. "
"Use the module from its new location datalad.local.wtf instead.",
DeprecationWarning)
from datalad.local.wtf import *
| 24.8 | 71 | 0.729839 | import warnings
warnings.warn(
"datalad.plugin.wtf is deprecated and will be removed in a future "
"release. "
"Use the module from its new location datalad.local.wtf instead.",
DeprecationWarning)
from datalad.local.wtf import *
| 0 | 0 | 0 |
84cf22b27aac3a65db381fcda7a1bc9a171a4dc0 | 588 | py | Python | rsi/state.py | nuke-makes-games/RSI.py | 340311412a0869fabaf3fa3c3030310a8a3e0c89 | [
"MIT"
] | 2 | 2018-09-05T16:19:25.000Z | 2020-02-04T17:18:22.000Z | rsi/state.py | nuke-makes-games/RSI.py | 340311412a0869fabaf3fa3c3030310a8a3e0c89 | [
"MIT"
] | 5 | 2020-05-13T23:46:06.000Z | 2022-01-24T16:04:42.000Z | rsi/state.py | nuke-makes-games/RSI.py | 340311412a0869fabaf3fa3c3030310a8a3e0c89 | [
"MIT"
] | 5 | 2019-06-19T11:03:29.000Z | 2020-08-15T15:58:32.000Z | from typing import List, Tuple, Dict, Any
from PIL import Image
| 36.75 | 90 | 0.559524 | from typing import List, Tuple, Dict, Any
from PIL import Image
class State(object):
def __init__(self,
name: str,
size: Tuple[int, int],
directions: int = 1) -> None:
self.name = name # type: str
self.flags = {} # type: Dict[str, Any]
self.size = size # type: Tuple[int, int]
self.directions = directions # type: int
self.delays = [[] for i in range(self.directions)] # type: List[List[float]]
self.icons = [[] for i in range(self.directions)] # type: List[List[Image.Image]]
| 476 | -1 | 49 |
f658e8ccbd73923fbc6b2c597d99bc868e8e814c | 22,841 | py | Python | flopy/modflow/mfoc.py | langevin/flopy | 2398a0b9a9294b4e2fb5c7e0228f0f42af45b80a | [
"BSD-3-Clause"
] | null | null | null | flopy/modflow/mfoc.py | langevin/flopy | 2398a0b9a9294b4e2fb5c7e0228f0f42af45b80a | [
"BSD-3-Clause"
] | null | null | null | flopy/modflow/mfoc.py | langevin/flopy | 2398a0b9a9294b4e2fb5c7e0228f0f42af45b80a | [
"BSD-3-Clause"
] | null | null | null | """
mfoc module. Contains the ModflowOc class. Note that the user can access
the ModflowOc class as `flopy.modflow.ModflowOc`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/index.html?oc.htm>`_.
"""
import sys
from flopy.mbase import Package
class ModflowOc(Package):
"""
MODFLOW Output Control Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
ihedfm : int
is a code for the format in which heads will be printed.
(default is 0).
iddnfm : int
is a code for the format in which heads will be printed.
(default is 0).
chedfm : string
is a character value that specifies the format for saving heads.
The format must contain 20 characters or less and must be a valid
Fortran format that is enclosed in parentheses. The format must be
enclosed in apostrophes if it contains one or more blanks or commas.
The optional word LABEL after the format is used to indicate that
each layer of output should be preceded with a line that defines the
output (simulation time, the layer being output, and so forth). If
there is no record specifying CHEDFM, then heads are written to a
binary (unformatted) file. Binary files are usually more compact than
text files, but they are not generally transportable among different
computer operating systems or different Fortran compilers.
(default is None)
cddnfm : string
is a character value that specifies the format for saving drawdown.
The format must contain 20 characters or less and must be a valid
Fortran format that is enclosed in parentheses. The format must be
enclosed in apostrophes if it contains one or more blanks or commas.
The optional word LABEL after the format is used to indicate that
each layer of output should be preceded with a line that defines the
output (simulation time, the layer being output, and so forth). If
there is no record specifying CDDNFM, then drawdowns are written to a
binary (unformatted) file. Binary files are usually more compact than
text files, but they are not generally transportable among different
computer operating systems or different Fortran compilers.
(default is None)
cboufm : string
is a character value that specifies the format for saving ibound.
The format must contain 20 characters or less and must be a valid
Fortran format that is enclosed in parentheses. The format must be
enclosed in apostrophes if it contains one or more blanks or commas.
The optional word LABEL after the format is used to indicate that
each layer of output should be preceded with a line that defines the
output (simulation time, the layer being output, and so forth). If
there is no record specifying CBOUFM, then ibounds are written to a
binary (unformatted) file. Binary files are usually more compact than
text files, but they are not generally transportable among different
computer operating systems or different Fortran compilers.
(default is None)
stress_period_data : dictionary of of lists
Dictionary key is a tuple with the zero-based period and step
(IPEROC, ITSOC) for each print/save option list.
(default is {(0,0):['save head']})
The list can have any valid MODFLOW OC print/save option:
PRINT HEAD
PRINT DRAWDOWN
PRINT BUDGET
SAVE HEAD
SAVE DRAWDOWN
SAVE BUDGET
SAVE IBOUND
The lists can also include (1) DDREFERENCE in the list to reset
drawdown reference to the period and step and (2) a list of layers
for PRINT HEAD, SAVE HEAD, PRINT DRAWDOWN, SAVE DRAWDOWN, and
SAVE IBOUND.
The list is used for every stress period and time step after the
(IPEROC, ITSOC) tuple until a (IPEROC, ITSOC) tuple is entered with
and empty list.
compact : boolean
Save results in compact budget form. (default is True).
extension : list of strings
(default is ['oc','hds','ddn','cbc']).
unitnumber : list of ints
(default is [14, 51, 52, 53]).
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
The "words" method for specifying output control is the only option
available. Also, the "compact" budget should normally be used as it
produces files that are typically much smaller. The compact budget form is
also a requirement for using the MODPATH particle tracking program.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> spd = {(0, 0): ['print head'],
... (0, 1): [],
... (0, 249): ['print head'],
... (0, 250): [],
... (0, 499): ['print head', 'save ibound'],
... (0, 500): [],
... (0, 749): ['print head', 'ddreference'],
... (0, 750): [],
... (0, 999): ['print head']}
>>> oc = flopy.modflow.ModflowOc3(m, stress_period_data=spd, cboufm='(20i5)')
"""
def __init__(self, model,\
ihedfm=0, iddnfm=0, chedfm=None, cddnfm=None,\
cboufm=None, compact=True,\
stress_period_data={(0,0):['save head']},\
extension=['oc','hds','ddn','cbc'],\
unitnumber=[14, 51, 52, 53]):
"""
Package constructor.
"""
# Call ancestor's init to set self.parent,
# extension, name and unit number
hds_fmt = 'DATA(BINARY)'
ddn_fmt = 'DATA(BINARY)'
if chedfm is not None:
hds_fmt = 'DATA'
if cddnfm is not None:
ddn_fmt = 'DATA'
ibouun = 0
ibndsav = False
for key in list(stress_period_data.keys()):
t = stress_period_data[key]
if len(t) > 0:
for option in t:
if 'ibound' in option.lower():
ibndsav = True
break
name = ['OC', hds_fmt, ddn_fmt, 'DATA(BINARY)']
extra = ['', 'REPLACE', 'REPLACE', 'REPLACE']
if ibndsav == True:
if cboufm == None:
name.append('DATA(BINARY)')
else:
name.append('DATA')
extension.append('ibo')
unitnumber.append(114)
ibouun = unitnumber[-1]
extra.append('REPLACE')
Package.__init__(self, model, extension=extension, name=name, unit_number=unitnumber,
extra=extra) # Call ancestor's init to set self.parent, extension, name and unit number
self.heading = '# Output control package file'+\
' for MODFLOW, generated by Flopy.'
self.url = 'oc.htm'
self.ihedfm = ihedfm
self.iddnfm = iddnfm
self.chedfm = chedfm
self.cddnfm = cddnfm
self.ibouun = ibouun
self.cboufm = cboufm
self.compact = compact
self.stress_period_data = stress_period_data
self.parent.add_package(self)
def write_file(self):
"""
Write the file.
"""
f_oc = open(self.fn_path, 'w')
f_oc.write('{}\n'.format(self.heading))
# write options
f_oc.write('HEAD PRINT FORMAT {0:3.0f}\n'\
.format(self.ihedfm))
if self.chedfm is not None:
f_oc.write('HEAD SAVE FORMAT {0:20s} LABEL\n'\
.format(self.chedfm))
f_oc.write('HEAD SAVE UNIT {0:5.0f}\n'\
.format(self.unit_number[1]))
f_oc.write('DRAWDOWN PRINT FORMAT {0:3.0f}\n'\
.format(self.iddnfm))
if self.cddnfm is not None:
f_oc.write('DRAWDOWN SAVE FORMAT {0:20s} LABEL\n'\
.format(self.cddnfm))
f_oc.write('DRAWDOWN SAVE UNIT {0:5.0f}\n'\
.format(self.unit_number[2]))
if self.ibouun > 0:
if self.cboufm is not None:
f_oc.write('IBOUND SAVE FORMAT {0:20s} LABEL\n'\
.format(self.cboufm))
f_oc.write('IBOUND SAVE UNIT {0:5.0f}\n'\
.format(self.unit_number[4]))
if self.compact:
f_oc.write('COMPACT BUDGET FILES\n')
# add a line separator between header and stress
# period data
f_oc.write('\n')
#write the transient sequence described by the data dict
nr, nc, nl, nper = self.parent.get_nrow_ncol_nlay_nper()
nstp = self.parent.get_package('DIS').nstp
keys = list(self.stress_period_data.keys())
keys.sort()
data = []
lines = ''
ddnref = ''
for kper in range(nper):
for kstp in range(nstp[kper]):
kperkstp = (kper, kstp)
if kperkstp in keys:
data = self.stress_period_data[kperkstp]
if not isinstance(data, list):
data = [data]
lines = ''
if len(data) > 0:
for item in data:
if 'DDREFERENCE' in item.upper():
ddnref = item.lower()
else:
lines += '{}\n'.format(item)
if len(lines) > 0:
f_oc.write('period {} step {} {}\n'.format(kper+1, kstp+1, ddnref))
f_oc.write(lines)
f_oc.write('\n')
ddnref = ''
# close oc file
f_oc.close()
@staticmethod
def load(f, model, nper=None, nstp=None, nlay=None, ext_unit_dict=None):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
nper : int
The number of stress periods. If nper is None, then nper will be
obtained from the model object. (default is None).
nstp : list
List containing the number of time steps in each stress period.
If nstp is None, then nstp will be obtained from the DIS package
attached to the model object. (default is None).
nlay : int
The number of model layers. If nlay is None, then nnlay will be
obtained from the model object. nlay only needs to be specified
if an empty model object is passed in and the oc file being loaded
is defined using numeric codes. (default is None).
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
oc : ModflowOc object
ModflowOc object.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> oc = flopy.modflow.ModflowOc.load('test.oc', m)
"""
if model.verbose:
sys.stdout.write('loading oc package file...\n')
if nper is None:
nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper()
if nstp is None:
nstp = model.get_package('DIS').nstp.array
#initialize
ihedfm = 0
iddnfm = 0
ihedun = 0
iddnun = 0
ibouun = 0
compact = False
chedfm = None
cddnfm = None
cboufm = None
words = []
wordrec = []
numericformat = False
ihedfm, iddnfm = 0, 0
stress_period_data = {}
#open file
if not hasattr(f, 'read'):
filename = f
f = open(filename, 'r')
# read header
ipos = f.tell()
while True:
line = f.readline()
if line[0] == '#':
continue
elif line[0] == []:
continue
else:
lnlst = line.strip().split()
try:
ihedfm, iddnfm = int(lnlst[0]), int(lnlst[1])
ihedun, iddnun = int(lnlst[2]), int(lnlst[3])
numericformat = True
except:
f.seek(ipos)
pass
# exit so the remaining data can be read
# from the file based on numericformat
break
# set pointer to current position in the OC file
ipos = f.tell()
#process each line
lines = []
if numericformat == True:
for iperoc in range(nper):
for itsoc in range(nstp[iperoc]):
line = f.readline()
lnlst = line.strip().split()
incode, ihddfl = int(lnlst[0]), int(lnlst[1])
ibudfl, icbcfl = int(lnlst[2]), int(lnlst[3])
# new print and save flags are needed if incode is not
# less than 0.
if incode >= 0:
lines = []
# use print options from the last time step
else:
if len(lines) > 0:
stress_period_data[(iperoc, itsoc)] = list(lines)
continue
# set print and save budget flags
if ibudfl != 0:
lines.append('PRINT BUDGET')
if icbcfl != 0:
lines.append('PRINT BUDGET')
if incode == 0:
line = f.readline()
lnlst = line.strip().split()
hdpr, ddpr = int(lnlst[0]), int(lnlst[1])
hdsv, ddsv = int(lnlst[2]), int(lnlst[3])
if hdpr != 0:
lines.append('PRINT HEAD')
if ddpr != 0:
lines.append('PRINT DRAWDOWN')
if hdsv != 0:
lines.append('SAVE HEAD')
if ddsv != 0:
lines.append('SAVE DRAWDOWN')
elif incode > 0:
headprint = ''
headsave = ''
ddnprint = ''
ddnsave = ''
for k in range(nlay):
line = f.readline()
lnlst = line.strip().split()
hdpr, ddpr = int(lnlst[0]), int(lnlst[1])
hdsv, ddsv = int(lnlst[2]), int(lnlst[3])
if hdpr != 0:
headprint += ' {}'.format(k+1)
if ddpr != 0:
ddnprint += ' {}'.format(k+1)
if hdsv != 0:
headsave += ' {}'.format(k+1)
if ddsv != 0:
ddnsave += ' {}'.format(k+1)
if len(headprint) > 0:
lines.append('PRINT HEAD'+headprint)
if len(ddnprint) > 0:
lines.append('PRINT DRAWDOWN'+ddnprint)
if len(headsave) > 0:
lines.append('SAVE HEAD'+headdave)
if len(ddnsave) > 0:
lines.append('SAVE DRAWDOWN'+ddnsave)
stress_period_data[(iperoc, itsoc)] = list(lines)
else:
iperoc, itsoc = 0, 0
while True:
line = f.readline()
if len(line) < 1:
break
lnlst = line.strip().split()
if line[0] == '#':
continue
# added by JJS 12/12/14 to avoid error when there is a blank line in the OC file
if lnlst == []:
continue
# end add
#dataset 1 values
elif ('HEAD' in lnlst[0].upper() and
'PRINT' in lnlst[1].upper() and
'FORMAT' in lnlst[2].upper()
):
ihedfm = int(lnlst[3])
elif ('HEAD' in lnlst[0].upper() and
'SAVE' in lnlst[1].upper() and
'FORMAT' in lnlst[2].upper()
):
chedfm = lnlst[3]
elif ('HEAD' in lnlst[0].upper() and
'SAVE' in lnlst[1].upper() and
'UNIT' in lnlst[2].upper()
):
ihedun = int(lnlst[3])
elif ('DRAWDOWN' in lnlst[0].upper() and
'PRINT' in lnlst[1].upper() and
'FORMAT' in lnlst[2].upper()
):
iddnfm = int(lnlst[3])
elif ('DRAWDOWN' in lnlst[0].upper() and
'SAVE' in lnlst[1].upper() and
'FORMAT' in lnlst[2].upper()
):
cddnfm = lnlst[3]
elif ('DRAWDOWN' in lnlst[0].upper() and
'SAVE' in lnlst[1].upper() and
'UNIT' in lnlst[2].upper()
):
iddnun = int(lnlst[3])
elif ('IBOUND' in lnlst[0].upper() and
'SAVE' in lnlst[1].upper() and
'FORMAT' in lnlst[2].upper()
):
cboufm = lnlst[3]
elif ('IBOUND' in lnlst[0].upper() and
'SAVE' in lnlst[1].upper() and
'UNIT' in lnlst[2].upper()
):
ibouun = int(lnlst[3])
elif 'COMPACT' in lnlst[0].upper():
compact = True
#dataset 2
elif 'PERIOD' in lnlst[0].upper():
if len(lines) > 0:
if iperoc > 0:
# create period step tuple
kperkstp = (iperoc-1, itsoc-1)
# save data
stress_period_data[kperkstp] = lines
# reset lines
lines = []
# turn off oc if required
if iperoc > 0:
if itsoc==nstp[iperoc-1]:
iperoc1 = iperoc + 1
itsoc1 = 1
else:
iperoc1 = iperoc
itsoc1 = itsoc + 1
else:
iperoc1, itsoc1 = iperoc, itsoc
# update iperoc and itsoc
iperoc = int(lnlst[1])
itsoc = int(lnlst[3])
# do not used data that exceeds nper
if iperoc > nper:
break
# add a empty list if necessary
iempty = False
if iperoc != iperoc1:
iempty = True
else:
if itsoc != itsoc1:
iempty = True
if iempty == True:
kperkstp = (iperoc1-1, itsoc1-1)
stress_period_data[kperkstp] = []
#dataset 3
elif 'PRINT' in lnlst[0].upper():
lines.append('{} {}'.format(lnlst[0].lower(), lnlst[1].lower()))
elif 'SAVE' in lnlst[0].upper() :
lines.append('{} {}'.format(lnlst[0].lower(), lnlst[1].lower()))
else:
print('Error encountered in OC import.')
print('Creating default OC package.')
return ModflowOc(model)
#store the last record in word
if len(lines) > 0:
# create period step tuple
kperkstp = (iperoc-1, itsoc-1)
# save data
stress_period_data[kperkstp] = lines
# add a empty list if necessary
iempty = False
if iperoc != iperoc1:
iempty = True
else:
if itsoc != itsoc1:
iempty = True
if iempty == True:
kperkstp = (iperoc1-1, itsoc1-1)
stress_period_data[kperkstp] = []
# reset unit numbers
unitnumber=[14, 51, 52, 53]
if ihedun > 0:
model.add_pop_key_list(ihedun)
if iddnun > 0:
model.add_pop_key_list(iddnun)
if ibouun > 0:
model.add_pop_key_list(ibouun)
if cboufm == None:
cboufm = True
# create instance of oc class
oc = ModflowOc(model, ihedfm=ihedfm, iddnfm=iddnfm,
chedfm=chedfm, cddnfm=cddnfm, cboufm=cboufm,
compact=compact,
stress_period_data=stress_period_data)
return oc | 39.585789 | 114 | 0.467668 | """
mfoc module. Contains the ModflowOc class. Note that the user can access
the ModflowOc class as `flopy.modflow.ModflowOc`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/index.html?oc.htm>`_.
"""
import sys
from flopy.mbase import Package
class ModflowOc(Package):
"""
MODFLOW Output Control Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
ihedfm : int
is a code for the format in which heads will be printed.
(default is 0).
iddnfm : int
is a code for the format in which heads will be printed.
(default is 0).
chedfm : string
is a character value that specifies the format for saving heads.
The format must contain 20 characters or less and must be a valid
Fortran format that is enclosed in parentheses. The format must be
enclosed in apostrophes if it contains one or more blanks or commas.
The optional word LABEL after the format is used to indicate that
each layer of output should be preceded with a line that defines the
output (simulation time, the layer being output, and so forth). If
there is no record specifying CHEDFM, then heads are written to a
binary (unformatted) file. Binary files are usually more compact than
text files, but they are not generally transportable among different
computer operating systems or different Fortran compilers.
(default is None)
cddnfm : string
is a character value that specifies the format for saving drawdown.
The format must contain 20 characters or less and must be a valid
Fortran format that is enclosed in parentheses. The format must be
enclosed in apostrophes if it contains one or more blanks or commas.
The optional word LABEL after the format is used to indicate that
each layer of output should be preceded with a line that defines the
output (simulation time, the layer being output, and so forth). If
there is no record specifying CDDNFM, then drawdowns are written to a
binary (unformatted) file. Binary files are usually more compact than
text files, but they are not generally transportable among different
computer operating systems or different Fortran compilers.
(default is None)
cboufm : string
is a character value that specifies the format for saving ibound.
The format must contain 20 characters or less and must be a valid
Fortran format that is enclosed in parentheses. The format must be
enclosed in apostrophes if it contains one or more blanks or commas.
The optional word LABEL after the format is used to indicate that
each layer of output should be preceded with a line that defines the
output (simulation time, the layer being output, and so forth). If
there is no record specifying CBOUFM, then ibounds are written to a
binary (unformatted) file. Binary files are usually more compact than
text files, but they are not generally transportable among different
computer operating systems or different Fortran compilers.
(default is None)
stress_period_data : dictionary of of lists
Dictionary key is a tuple with the zero-based period and step
(IPEROC, ITSOC) for each print/save option list.
(default is {(0,0):['save head']})
The list can have any valid MODFLOW OC print/save option:
PRINT HEAD
PRINT DRAWDOWN
PRINT BUDGET
SAVE HEAD
SAVE DRAWDOWN
SAVE BUDGET
SAVE IBOUND
The lists can also include (1) DDREFERENCE in the list to reset
drawdown reference to the period and step and (2) a list of layers
for PRINT HEAD, SAVE HEAD, PRINT DRAWDOWN, SAVE DRAWDOWN, and
SAVE IBOUND.
The list is used for every stress period and time step after the
(IPEROC, ITSOC) tuple until a (IPEROC, ITSOC) tuple is entered with
and empty list.
compact : boolean
Save results in compact budget form. (default is True).
extension : list of strings
(default is ['oc','hds','ddn','cbc']).
unitnumber : list of ints
(default is [14, 51, 52, 53]).
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
The "words" method for specifying output control is the only option
available. Also, the "compact" budget should normally be used as it
produces files that are typically much smaller. The compact budget form is
also a requirement for using the MODPATH particle tracking program.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> spd = {(0, 0): ['print head'],
... (0, 1): [],
... (0, 249): ['print head'],
... (0, 250): [],
... (0, 499): ['print head', 'save ibound'],
... (0, 500): [],
... (0, 749): ['print head', 'ddreference'],
... (0, 750): [],
... (0, 999): ['print head']}
>>> oc = flopy.modflow.ModflowOc3(m, stress_period_data=spd, cboufm='(20i5)')
"""
def __init__(self, model,\
ihedfm=0, iddnfm=0, chedfm=None, cddnfm=None,\
cboufm=None, compact=True,\
stress_period_data={(0,0):['save head']},\
extension=['oc','hds','ddn','cbc'],\
unitnumber=[14, 51, 52, 53]):
"""
Package constructor.
"""
# Call ancestor's init to set self.parent,
# extension, name and unit number
hds_fmt = 'DATA(BINARY)'
ddn_fmt = 'DATA(BINARY)'
if chedfm is not None:
hds_fmt = 'DATA'
if cddnfm is not None:
ddn_fmt = 'DATA'
ibouun = 0
ibndsav = False
for key in list(stress_period_data.keys()):
t = stress_period_data[key]
if len(t) > 0:
for option in t:
if 'ibound' in option.lower():
ibndsav = True
break
name = ['OC', hds_fmt, ddn_fmt, 'DATA(BINARY)']
extra = ['', 'REPLACE', 'REPLACE', 'REPLACE']
if ibndsav == True:
if cboufm == None:
name.append('DATA(BINARY)')
else:
name.append('DATA')
extension.append('ibo')
unitnumber.append(114)
ibouun = unitnumber[-1]
extra.append('REPLACE')
Package.__init__(self, model, extension=extension, name=name, unit_number=unitnumber,
extra=extra) # Call ancestor's init to set self.parent, extension, name and unit number
self.heading = '# Output control package file'+\
' for MODFLOW, generated by Flopy.'
self.url = 'oc.htm'
self.ihedfm = ihedfm
self.iddnfm = iddnfm
self.chedfm = chedfm
self.cddnfm = cddnfm
self.ibouun = ibouun
self.cboufm = cboufm
self.compact = compact
self.stress_period_data = stress_period_data
self.parent.add_package(self)
def __repr__( self ):
return 'Output control package class'
def write_file(self):
"""
Write the file.
"""
f_oc = open(self.fn_path, 'w')
f_oc.write('{}\n'.format(self.heading))
# write options
f_oc.write('HEAD PRINT FORMAT {0:3.0f}\n'\
.format(self.ihedfm))
if self.chedfm is not None:
f_oc.write('HEAD SAVE FORMAT {0:20s} LABEL\n'\
.format(self.chedfm))
f_oc.write('HEAD SAVE UNIT {0:5.0f}\n'\
.format(self.unit_number[1]))
f_oc.write('DRAWDOWN PRINT FORMAT {0:3.0f}\n'\
.format(self.iddnfm))
if self.cddnfm is not None:
f_oc.write('DRAWDOWN SAVE FORMAT {0:20s} LABEL\n'\
.format(self.cddnfm))
f_oc.write('DRAWDOWN SAVE UNIT {0:5.0f}\n'\
.format(self.unit_number[2]))
if self.ibouun > 0:
if self.cboufm is not None:
f_oc.write('IBOUND SAVE FORMAT {0:20s} LABEL\n'\
.format(self.cboufm))
f_oc.write('IBOUND SAVE UNIT {0:5.0f}\n'\
.format(self.unit_number[4]))
if self.compact:
f_oc.write('COMPACT BUDGET FILES\n')
# add a line separator between header and stress
# period data
f_oc.write('\n')
#write the transient sequence described by the data dict
nr, nc, nl, nper = self.parent.get_nrow_ncol_nlay_nper()
nstp = self.parent.get_package('DIS').nstp
keys = list(self.stress_period_data.keys())
keys.sort()
data = []
lines = ''
ddnref = ''
for kper in range(nper):
for kstp in range(nstp[kper]):
kperkstp = (kper, kstp)
if kperkstp in keys:
data = self.stress_period_data[kperkstp]
if not isinstance(data, list):
data = [data]
lines = ''
if len(data) > 0:
for item in data:
if 'DDREFERENCE' in item.upper():
ddnref = item.lower()
else:
lines += '{}\n'.format(item)
if len(lines) > 0:
f_oc.write('period {} step {} {}\n'.format(kper+1, kstp+1, ddnref))
f_oc.write(lines)
f_oc.write('\n')
ddnref = ''
# close oc file
f_oc.close()
@staticmethod
def load(f, model, nper=None, nstp=None, nlay=None, ext_unit_dict=None):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
nper : int
The number of stress periods. If nper is None, then nper will be
obtained from the model object. (default is None).
nstp : list
List containing the number of time steps in each stress period.
If nstp is None, then nstp will be obtained from the DIS package
attached to the model object. (default is None).
nlay : int
The number of model layers. If nlay is None, then nnlay will be
obtained from the model object. nlay only needs to be specified
if an empty model object is passed in and the oc file being loaded
is defined using numeric codes. (default is None).
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
oc : ModflowOc object
ModflowOc object.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> oc = flopy.modflow.ModflowOc.load('test.oc', m)
"""
if model.verbose:
sys.stdout.write('loading oc package file...\n')
if nper is None:
nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper()
if nstp is None:
nstp = model.get_package('DIS').nstp.array
#initialize
ihedfm = 0
iddnfm = 0
ihedun = 0
iddnun = 0
ibouun = 0
compact = False
chedfm = None
cddnfm = None
cboufm = None
words = []
wordrec = []
numericformat = False
ihedfm, iddnfm = 0, 0
stress_period_data = {}
#open file
if not hasattr(f, 'read'):
filename = f
f = open(filename, 'r')
# read header
ipos = f.tell()
while True:
line = f.readline()
if line[0] == '#':
continue
elif line[0] == []:
continue
else:
lnlst = line.strip().split()
try:
ihedfm, iddnfm = int(lnlst[0]), int(lnlst[1])
ihedun, iddnun = int(lnlst[2]), int(lnlst[3])
numericformat = True
except:
f.seek(ipos)
pass
# exit so the remaining data can be read
# from the file based on numericformat
break
# set pointer to current position in the OC file
ipos = f.tell()
#process each line
lines = []
if numericformat == True:
for iperoc in range(nper):
for itsoc in range(nstp[iperoc]):
line = f.readline()
lnlst = line.strip().split()
incode, ihddfl = int(lnlst[0]), int(lnlst[1])
ibudfl, icbcfl = int(lnlst[2]), int(lnlst[3])
# new print and save flags are needed if incode is not
# less than 0.
if incode >= 0:
lines = []
# use print options from the last time step
else:
if len(lines) > 0:
stress_period_data[(iperoc, itsoc)] = list(lines)
continue
# set print and save budget flags
if ibudfl != 0:
lines.append('PRINT BUDGET')
if icbcfl != 0:
lines.append('PRINT BUDGET')
if incode == 0:
line = f.readline()
lnlst = line.strip().split()
hdpr, ddpr = int(lnlst[0]), int(lnlst[1])
hdsv, ddsv = int(lnlst[2]), int(lnlst[3])
if hdpr != 0:
lines.append('PRINT HEAD')
if ddpr != 0:
lines.append('PRINT DRAWDOWN')
if hdsv != 0:
lines.append('SAVE HEAD')
if ddsv != 0:
lines.append('SAVE DRAWDOWN')
elif incode > 0:
headprint = ''
headsave = ''
ddnprint = ''
ddnsave = ''
for k in range(nlay):
line = f.readline()
lnlst = line.strip().split()
hdpr, ddpr = int(lnlst[0]), int(lnlst[1])
hdsv, ddsv = int(lnlst[2]), int(lnlst[3])
if hdpr != 0:
headprint += ' {}'.format(k+1)
if ddpr != 0:
ddnprint += ' {}'.format(k+1)
if hdsv != 0:
headsave += ' {}'.format(k+1)
if ddsv != 0:
ddnsave += ' {}'.format(k+1)
if len(headprint) > 0:
lines.append('PRINT HEAD'+headprint)
if len(ddnprint) > 0:
lines.append('PRINT DRAWDOWN'+ddnprint)
if len(headsave) > 0:
lines.append('SAVE HEAD'+headdave)
if len(ddnsave) > 0:
lines.append('SAVE DRAWDOWN'+ddnsave)
stress_period_data[(iperoc, itsoc)] = list(lines)
else:
iperoc, itsoc = 0, 0
while True:
line = f.readline()
if len(line) < 1:
break
lnlst = line.strip().split()
if line[0] == '#':
continue
# added by JJS 12/12/14 to avoid error when there is a blank line in the OC file
if lnlst == []:
continue
# end add
#dataset 1 values
elif ('HEAD' in lnlst[0].upper() and
'PRINT' in lnlst[1].upper() and
'FORMAT' in lnlst[2].upper()
):
ihedfm = int(lnlst[3])
elif ('HEAD' in lnlst[0].upper() and
'SAVE' in lnlst[1].upper() and
'FORMAT' in lnlst[2].upper()
):
chedfm = lnlst[3]
elif ('HEAD' in lnlst[0].upper() and
'SAVE' in lnlst[1].upper() and
'UNIT' in lnlst[2].upper()
):
ihedun = int(lnlst[3])
elif ('DRAWDOWN' in lnlst[0].upper() and
'PRINT' in lnlst[1].upper() and
'FORMAT' in lnlst[2].upper()
):
iddnfm = int(lnlst[3])
elif ('DRAWDOWN' in lnlst[0].upper() and
'SAVE' in lnlst[1].upper() and
'FORMAT' in lnlst[2].upper()
):
cddnfm = lnlst[3]
elif ('DRAWDOWN' in lnlst[0].upper() and
'SAVE' in lnlst[1].upper() and
'UNIT' in lnlst[2].upper()
):
iddnun = int(lnlst[3])
elif ('IBOUND' in lnlst[0].upper() and
'SAVE' in lnlst[1].upper() and
'FORMAT' in lnlst[2].upper()
):
cboufm = lnlst[3]
elif ('IBOUND' in lnlst[0].upper() and
'SAVE' in lnlst[1].upper() and
'UNIT' in lnlst[2].upper()
):
ibouun = int(lnlst[3])
elif 'COMPACT' in lnlst[0].upper():
compact = True
#dataset 2
elif 'PERIOD' in lnlst[0].upper():
if len(lines) > 0:
if iperoc > 0:
# create period step tuple
kperkstp = (iperoc-1, itsoc-1)
# save data
stress_period_data[kperkstp] = lines
# reset lines
lines = []
# turn off oc if required
if iperoc > 0:
if itsoc==nstp[iperoc-1]:
iperoc1 = iperoc + 1
itsoc1 = 1
else:
iperoc1 = iperoc
itsoc1 = itsoc + 1
else:
iperoc1, itsoc1 = iperoc, itsoc
# update iperoc and itsoc
iperoc = int(lnlst[1])
itsoc = int(lnlst[3])
# do not used data that exceeds nper
if iperoc > nper:
break
# add a empty list if necessary
iempty = False
if iperoc != iperoc1:
iempty = True
else:
if itsoc != itsoc1:
iempty = True
if iempty == True:
kperkstp = (iperoc1-1, itsoc1-1)
stress_period_data[kperkstp] = []
#dataset 3
elif 'PRINT' in lnlst[0].upper():
lines.append('{} {}'.format(lnlst[0].lower(), lnlst[1].lower()))
elif 'SAVE' in lnlst[0].upper() :
lines.append('{} {}'.format(lnlst[0].lower(), lnlst[1].lower()))
else:
print('Error encountered in OC import.')
print('Creating default OC package.')
return ModflowOc(model)
#store the last record in word
if len(lines) > 0:
# create period step tuple
kperkstp = (iperoc-1, itsoc-1)
# save data
stress_period_data[kperkstp] = lines
# add a empty list if necessary
iempty = False
if iperoc != iperoc1:
iempty = True
else:
if itsoc != itsoc1:
iempty = True
if iempty == True:
kperkstp = (iperoc1-1, itsoc1-1)
stress_period_data[kperkstp] = []
# reset unit numbers
unitnumber=[14, 51, 52, 53]
if ihedun > 0:
model.add_pop_key_list(ihedun)
if iddnun > 0:
model.add_pop_key_list(iddnun)
if ibouun > 0:
model.add_pop_key_list(ibouun)
if cboufm == None:
cboufm = True
# create instance of oc class
oc = ModflowOc(model, ihedfm=ihedfm, iddnfm=iddnfm,
chedfm=chedfm, cddnfm=cddnfm, cboufm=cboufm,
compact=compact,
stress_period_data=stress_period_data)
return oc | 47 | 0 | 29 |
8349256563b8b4c78b3b7cdafe91df9d453f14f3 | 119 | py | Python | inz/schemas.py | matbur/inz | f6be1a685761f99f8c808d8b23f58debf7e19da2 | [
"MIT"
] | null | null | null | inz/schemas.py | matbur/inz | f6be1a685761f99f8c808d8b23f58debf7e19da2 | [
"MIT"
] | 2 | 2020-03-24T16:35:39.000Z | 2020-03-31T00:33:08.000Z | inz/schemas.py | matbur/inz | f6be1a685761f99f8c808d8b23f58debf7e19da2 | [
"MIT"
] | null | null | null | from typing import List
NeuronSchema = List[float]
LayerSchema = List[NeuronSchema]
NetworkSchema = List[LayerSchema]
| 19.833333 | 33 | 0.806723 | from typing import List
NeuronSchema = List[float]
LayerSchema = List[NeuronSchema]
NetworkSchema = List[LayerSchema]
| 0 | 0 | 0 |
0fa4c4b707a2fe6efc8f8b9d1d08a4a4a7bba8fd | 1,433 | py | Python | starlite/datastructures.py | to-ph/starlite | 8169749468c1fb76c408c9939669e89e18ca6f02 | [
"MIT"
] | 57 | 2021-12-19T08:26:00.000Z | 2022-01-06T06:02:29.000Z | starlite/datastructures.py | to-ph/starlite | 8169749468c1fb76c408c9939669e89e18ca6f02 | [
"MIT"
] | 12 | 2021-12-15T19:29:11.000Z | 2022-01-06T18:16:05.000Z | starlite/datastructures.py | to-ph/starlite | 8169749468c1fb76c408c9939669e89e18ca6f02 | [
"MIT"
] | 4 | 2021-12-30T05:30:16.000Z | 2022-01-03T20:19:58.000Z | import os
from copy import copy
from typing import Any, AsyncIterator, Dict, Iterator, Optional, Union, cast
from pydantic import BaseModel, FilePath, validator
from starlette.datastructures import State as StarletteStateClass
| 26.537037 | 78 | 0.688067 | import os
from copy import copy
from typing import Any, AsyncIterator, Dict, Iterator, Optional, Union, cast
from pydantic import BaseModel, FilePath, validator
from starlette.datastructures import State as StarletteStateClass
class State(StarletteStateClass):
def __copy__(self) -> "State":
"""
Returns a shallow copy of the given state object.
Customizes how the builtin "copy" function will work.
"""
return self.__class__(copy(self._state))
def copy(self) -> "State":
"""Returns a shallow copy of the given state object"""
return copy(self)
class StarliteType(BaseModel):
class Config:
arbitrary_types_allowed = True
class File(StarliteType):
path: FilePath
filename: str
stat_result: Optional[os.stat_result] = None
@validator("stat_result", always=True)
def validate_status_code( # pylint: disable=no-self-argument, no-self-use
cls, value: Optional[os.stat_result], values: Dict[str, Any]
) -> os.stat_result:
"""Set the stat_result value for the given filepath"""
return value or os.stat(cast(str, values.get("path")))
class Redirect(StarliteType):
path: str
class Stream(StarliteType):
class Config:
arbitrary_types_allowed = True
iterator: Union[Iterator[Any], AsyncIterator[Any]]
class Template(StarliteType):
name: str
context: Optional[Dict[str, Any]]
| 0 | 1,061 | 138 |
b181d0030ad8cc4abf4c2b847f5e8fba3ba43456 | 3,497 | py | Python | Programs/MP2/MP2_NUMPY/mp2.py | dgasmith/SICM2-Software-Summer-School-2014 | af97770cbade3bf4a246f21e607e8be66c9df7da | [
"MIT"
] | 2 | 2015-07-16T14:00:27.000Z | 2016-01-10T20:21:48.000Z | Programs/MP2/MP2_NUMPY/mp2.py | dgasmith/SICM2-Software-Summer-School-2014 | af97770cbade3bf4a246f21e607e8be66c9df7da | [
"MIT"
] | null | null | null | Programs/MP2/MP2_NUMPY/mp2.py | dgasmith/SICM2-Software-Summer-School-2014 | af97770cbade3bf4a246f21e607e8be66c9df7da | [
"MIT"
] | null | null | null | #
# Author: Daniel G. A. Smith
# Created: 6/15/14
# Original content from:
# http://sirius.chem.vt.edu/wiki/doku.php?id=crawdad:programming
#
import numpy as np
from scipy import linalg as SLA
#Setup a few constats for the HF computation
nuclear = 8.002367061810450
ndocc = 5
print 'Reading in integrals...'
# Read in integrals
So = np.genfromtxt('S.dat', delimiter=',')
To = np.genfromtxt('T.dat', delimiter=',')
Vo = np.genfromtxt('V.dat', delimiter=',')
Io = np.genfromtxt('eri.dat', delimiter=',')
#### Normal integrals
S = make_array(So)
T = make_array(To)
V = make_array(Vo)
### ERI
sh = []
for x in range(4):
sh.append(Io[:,x].astype(np.int) - 1)
### 8 fold symmetry
I = np.zeros(tuple(np.max(x)+1 for x in sh))
I[(sh[0], sh[1], sh[2], sh[3])] = Io[:, -1]
I[(sh[0], sh[1], sh[3], sh[2])] = Io[:, -1]
I[(sh[1], sh[0], sh[2], sh[3])] = Io[:, -1]
I[(sh[1], sh[0], sh[3], sh[2])] = Io[:, -1]
I[(sh[3], sh[2], sh[1], sh[0])] = Io[:, -1]
I[(sh[3], sh[2], sh[0], sh[1])] = Io[:, -1]
I[(sh[2], sh[3], sh[1], sh[0])] = Io[:, -1]
I[(sh[2], sh[3], sh[0], sh[1])] = Io[:, -1]
print '..Finished reading in integrals.\n'
# Compute Hcore
H = T + V
# Orthogonalizer A = S^-1/2
# Option 1
# Use built-in numpy functions
A = np.matrix(SLA.sqrtm(S)).I.real
# Option 2
# As coded from the website
# S_evals, S_evecs = SLA.eigh(S)
# S_evals = np.power(S_evals, -0.5)
# S_evals = np.diagflat(S_evals)
# S_evecs = np.matrix(S_evecs)
# A = S_evecs * S_evals * S_evecs.T
# Calculate initial core guess
# Using the matrix class
# * is equivalent to matrix multiplication
Hp = A * H * A
e,C2 = SLA.eigh(Hp)
C = A * C2
D = C[:, :ndocc] * C[:, :ndocc].T
print('\nStarting SCF iterationations\n')
Escf = 0.0
Enuc = nuclear
Eold = 0.0
maxiteration = 30
E_conv = 1E-10
for iteration in range(1, maxiteration + 1):
# Fock Build
J = np.einsum('pqrs,rs', I, D)
K = np.einsum('pqrs,qs', I, D)
F = H + J * 2 - K
Escf = np.einsum('ij,ij->', F + H, D) + Enuc
# Roothaan Update
print('@RHF Iteration %3d: Energy = %24.16f dE = %11.3E' % (iteration, Escf, Escf - Eold))
if (abs(Escf - Eold) < E_conv):
break
Eold = Escf
# New guess
Fp = A * F * A
e, C2 = SLA.eigh(Fp)
C = A * C2
D = C[:, :ndocc] * C[:, :ndocc].T
print 'SCF Final Energy %5.10f' % Escf
print '\nComputing MP2 energy...'
# Split eigenvectors and eigenvalues into o and v
Co = C[:, :ndocc]
Cv = C[:, ndocc:]
Eocc = e[:ndocc]
Evirt = e[ndocc:]
# Complete the AOpqrs -> MOiajb step
# "Noddy" N^8 algorithm
# MO = np.einsum('sB,rJ,qA,pI,pqrs->IAJB', Cv, Co, Cv, Co, I)
# N^5 algorithm
MO = np.einsum('rJ,pqrs->pqJs', Co, I)
MO = np.einsum('pI,pqJs->IqJs', Co, MO)
MO = np.einsum('sB,IqJs->IqJB', Cv, MO)
MO = np.einsum('qA,IqJB->IAJB', Cv, MO)
# Calculate energy denominators and MP2 energy
epsilon = 1/(Eocc.reshape(-1,1,1,1) - Evirt.reshape(-1,1,1) + Eocc.reshape(-1,1) - Evirt)
# Comput numerator
tmp_MP2 = 2*np.einsum('iajb,iajb->iajb', MO, MO)
tmp_MP2 -= np.einsum('iajb,ibja->ibja', MO, MO)
MP2corr = np.einsum('iajb,iajb->', tmp_MP2, epsilon)
Emp2 = MP2corr + Escf
print 'MP2 correlation energy: %.8f' % MP2corr
print 'MP2 total energy: %.8f' % Emp2
| 22.707792 | 94 | 0.592222 | #
# Author: Daniel G. A. Smith
# Created: 6/15/14
# Original content from:
# http://sirius.chem.vt.edu/wiki/doku.php?id=crawdad:programming
#
import numpy as np
from scipy import linalg as SLA
#Setup a few constats for the HF computation
nuclear = 8.002367061810450
ndocc = 5
print 'Reading in integrals...'
# Read in integrals
So = np.genfromtxt('S.dat', delimiter=',')
To = np.genfromtxt('T.dat', delimiter=',')
Vo = np.genfromtxt('V.dat', delimiter=',')
Io = np.genfromtxt('eri.dat', delimiter=',')
def make_array(arr):
I1 = arr[:, 0].astype(np.int) - 1
I2 = arr[:, 1].astype(np.int) - 1
out = np.zeros((np.max(I1) + 1, np.max(I2) + 1))
# 2 fold symmetry
# Use numpy advanced indexing
out[(I2,I1)] = arr[:, 2]
out[(I1,I2)] = arr[:, 2]
return np.matrix(out)
#### Normal integrals
S = make_array(So)
T = make_array(To)
V = make_array(Vo)
### ERI
sh = []
for x in range(4):
sh.append(Io[:,x].astype(np.int) - 1)
### 8 fold symmetry
I = np.zeros(tuple(np.max(x)+1 for x in sh))
I[(sh[0], sh[1], sh[2], sh[3])] = Io[:, -1]
I[(sh[0], sh[1], sh[3], sh[2])] = Io[:, -1]
I[(sh[1], sh[0], sh[2], sh[3])] = Io[:, -1]
I[(sh[1], sh[0], sh[3], sh[2])] = Io[:, -1]
I[(sh[3], sh[2], sh[1], sh[0])] = Io[:, -1]
I[(sh[3], sh[2], sh[0], sh[1])] = Io[:, -1]
I[(sh[2], sh[3], sh[1], sh[0])] = Io[:, -1]
I[(sh[2], sh[3], sh[0], sh[1])] = Io[:, -1]
print '..Finished reading in integrals.\n'
# Compute Hcore
H = T + V
# Orthogonalizer A = S^-1/2
# Option 1
# Use built-in numpy functions
A = np.matrix(SLA.sqrtm(S)).I.real
# Option 2
# As coded from the website
# S_evals, S_evecs = SLA.eigh(S)
# S_evals = np.power(S_evals, -0.5)
# S_evals = np.diagflat(S_evals)
# S_evecs = np.matrix(S_evecs)
# A = S_evecs * S_evals * S_evecs.T
# Calculate initial core guess
# Using the matrix class
# * is equivalent to matrix multiplication
Hp = A * H * A
e,C2 = SLA.eigh(Hp)
C = A * C2
D = C[:, :ndocc] * C[:, :ndocc].T
print('\nStarting SCF iterationations\n')
Escf = 0.0
Enuc = nuclear
Eold = 0.0
maxiteration = 30
E_conv = 1E-10
for iteration in range(1, maxiteration + 1):
# Fock Build
J = np.einsum('pqrs,rs', I, D)
K = np.einsum('pqrs,qs', I, D)
F = H + J * 2 - K
Escf = np.einsum('ij,ij->', F + H, D) + Enuc
# Roothaan Update
print('@RHF Iteration %3d: Energy = %24.16f dE = %11.3E' % (iteration, Escf, Escf - Eold))
if (abs(Escf - Eold) < E_conv):
break
Eold = Escf
# New guess
Fp = A * F * A
e, C2 = SLA.eigh(Fp)
C = A * C2
D = C[:, :ndocc] * C[:, :ndocc].T
print 'SCF Final Energy %5.10f' % Escf
print '\nComputing MP2 energy...'
# Split eigenvectors and eigenvalues into o and v
Co = C[:, :ndocc]
Cv = C[:, ndocc:]
Eocc = e[:ndocc]
Evirt = e[ndocc:]
# Complete the AOpqrs -> MOiajb step
# "Noddy" N^8 algorithm
# MO = np.einsum('sB,rJ,qA,pI,pqrs->IAJB', Cv, Co, Cv, Co, I)
# N^5 algorithm
MO = np.einsum('rJ,pqrs->pqJs', Co, I)
MO = np.einsum('pI,pqJs->IqJs', Co, MO)
MO = np.einsum('sB,IqJs->IqJB', Cv, MO)
MO = np.einsum('qA,IqJB->IAJB', Cv, MO)
# Calculate energy denominators and MP2 energy
epsilon = 1/(Eocc.reshape(-1,1,1,1) - Evirt.reshape(-1,1,1) + Eocc.reshape(-1,1) - Evirt)
# Comput numerator
tmp_MP2 = 2*np.einsum('iajb,iajb->iajb', MO, MO)
tmp_MP2 -= np.einsum('iajb,ibja->ibja', MO, MO)
MP2corr = np.einsum('iajb,iajb->', tmp_MP2, epsilon)
Emp2 = MP2corr + Escf
print 'MP2 correlation energy: %.8f' % MP2corr
print 'MP2 total energy: %.8f' % Emp2
| 269 | 0 | 23 |
3863ce9f8d10cfb3ae982a92420729f09cb0102c | 4,727 | py | Python | pyrobolearn/models/dmp/rhythmic_dmp.py | Pandinosaurus/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 | [
"Apache-2.0"
] | 2 | 2021-01-21T21:08:30.000Z | 2022-03-29T16:45:49.000Z | pyrobolearn/models/dmp/rhythmic_dmp.py | Pandinosaurus/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 | [
"Apache-2.0"
] | null | null | null | pyrobolearn/models/dmp/rhythmic_dmp.py | Pandinosaurus/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 | [
"Apache-2.0"
] | 1 | 2020-09-29T21:25:39.000Z | 2020-09-29T21:25:39.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Define the rhythmic dynamic movement primitive.
"""
import numpy as np
from pyrobolearn.models.dmp.canonical_systems import RhythmicCS
from pyrobolearn.models.dmp.forcing_terms import RhythmicForcingTerm
from pyrobolearn.models.dmp.dmp import DMP
__author__ = "Brian Delhaisse"
__copyright__ = "Copyright 2018, PyRoboLearn"
__credits__ = ["Brian Delhaisse"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "Brian Delhaisse"
__email__ = "briandelhaisse@gmail.com"
__status__ = "Development"
class RhythmicDMP(DMP):
r"""Rhythmic Dynamic Movement Primitive
Rhythmic DMPs have the same mathematical formulation as general DMPs, which is given by:
.. math:: \tau^2 \ddot{y} = K (g - y) - D \tau \dot{y} + f(s)
where :math:`\tau` is a scaling factor that allows to slow down or speed up the reproduced movement, :math:`K`
is the stiffness coefficient, :math:`D` is the damping coefficient, :math:`y, \dot{y}, \ddot{y}` are the position,
velocity, and acceleration of a DoF, and :math:`f(s)` is the non-linear forcing term.
However, the forcing term in the case of rhythmic DMPs is given by:
.. math:: f(s) = \frac{\sum_i \psi_i(s) w_i}{\sum_i \psi_i(s)} a
where :math:`w` are the learnable weight parameters, and :math:`\psi` are the basis functions evaluated at the
given input phase variable :math:`s`, and :math:`a` is the amplitude.
The basis functions (in the rhythmic case) are given by:
.. math:: \psi_i(s) = \exp \left( - h_i (\cos(s - c_i) - 1) \right)
where :math:`c_i` is the center of the basis, and :math:`h_i` is a measure of concentration.
Also, the canonical system associated with this transformation system is given by:
.. math:: \tau \dot{s} = 1
where :math:`\tau` is a scaling factor that allows to slow down or speed up the movement, and :math:`s` is the
phase variable that drives the DMP.
All these differential equations are solved using Euler's method.
References:
[1] "Dynamical movement primitives: Learning attractor models for motor behaviors", Ijspeert et al., 2013
"""
def __init__(self, num_dmps, num_basis, dt=0.01, y0=0, goal=1,
forcing_terms=None, stiffness=None, damping=None):
"""Initialize the rhythmic DMP
Args:
num_dmps (int): number of DMPs
num_basis (int): number of basis functions
dt (float): step integration for Euler's method
y0 (float, np.array): initial position(s)
goal (float, np.array): goal(s)
forcing_terms (list, ForcingTerm): the forcing terms (which can have different basis functions)
stiffness (float): stiffness coefficient
damping (float): damping coefficient
"""
# create rhythmic canonical system
cs = RhythmicCS(dt=dt)
# create forcing terms (each one contains the basis functions and learnable weights)
if forcing_terms is None:
if isinstance(num_basis, int):
forcing_terms = [RhythmicForcingTerm(cs, num_basis) for _ in range(num_dmps)]
else:
if not isinstance(num_basis, (np.ndarray, list, tuple, set)):
raise TypeError("Expecting 'num_basis' to be an int, list, tuple, np.array or set.")
if len(num_basis) != num_dmps:
raise ValueError("The length of th list of number of basis doesn't match the number of DMPs")
forcing_terms = [RhythmicForcingTerm(cs, n_basis) for n_basis in num_basis]
# call super class constructor
super(RhythmicDMP, self).__init__(canonical_system=cs, forcing_term=forcing_terms, y0=y0, goal=goal,
stiffness=stiffness, damping=damping)
def get_scaling_term(self, new_goal=None):
"""
Return the scaling term for the forcing term. For rhythmic DMPs it's non-diminishing, so this function just
returns 1.
"""
return np.ones(self.num_dmps)
def _generate_goal(self, y_des):
"""Generate the goal for path imitation.
For rhythmic DMPs, the goal is the average of the desired trajectory.
Args:
y_des (float[M,T]): the desired trajectory to follow (with shape [num_dmps, timesteps])
Returns:
float[M]: goal positions (one for each DMP)
"""
goal = np.zeros(self.num_dmps)
for n in range(self.num_dmps):
num_idx = ~np.isnan(y_des[n]) # ignore nan's when calculating goal
goal[n] = .5 * (y_des[n, num_idx].min() + y_des[n, num_idx].max())
return goal
| 41.104348 | 118 | 0.649672 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Define the rhythmic dynamic movement primitive.
"""
import numpy as np
from pyrobolearn.models.dmp.canonical_systems import RhythmicCS
from pyrobolearn.models.dmp.forcing_terms import RhythmicForcingTerm
from pyrobolearn.models.dmp.dmp import DMP
__author__ = "Brian Delhaisse"
__copyright__ = "Copyright 2018, PyRoboLearn"
__credits__ = ["Brian Delhaisse"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "Brian Delhaisse"
__email__ = "briandelhaisse@gmail.com"
__status__ = "Development"
class RhythmicDMP(DMP):
r"""Rhythmic Dynamic Movement Primitive
Rhythmic DMPs have the same mathematical formulation as general DMPs, which is given by:
.. math:: \tau^2 \ddot{y} = K (g - y) - D \tau \dot{y} + f(s)
where :math:`\tau` is a scaling factor that allows to slow down or speed up the reproduced movement, :math:`K`
is the stiffness coefficient, :math:`D` is the damping coefficient, :math:`y, \dot{y}, \ddot{y}` are the position,
velocity, and acceleration of a DoF, and :math:`f(s)` is the non-linear forcing term.
However, the forcing term in the case of rhythmic DMPs is given by:
.. math:: f(s) = \frac{\sum_i \psi_i(s) w_i}{\sum_i \psi_i(s)} a
where :math:`w` are the learnable weight parameters, and :math:`\psi` are the basis functions evaluated at the
given input phase variable :math:`s`, and :math:`a` is the amplitude.
The basis functions (in the rhythmic case) are given by:
.. math:: \psi_i(s) = \exp \left( - h_i (\cos(s - c_i) - 1) \right)
where :math:`c_i` is the center of the basis, and :math:`h_i` is a measure of concentration.
Also, the canonical system associated with this transformation system is given by:
.. math:: \tau \dot{s} = 1
where :math:`\tau` is a scaling factor that allows to slow down or speed up the movement, and :math:`s` is the
phase variable that drives the DMP.
All these differential equations are solved using Euler's method.
References:
[1] "Dynamical movement primitives: Learning attractor models for motor behaviors", Ijspeert et al., 2013
"""
def __init__(self, num_dmps, num_basis, dt=0.01, y0=0, goal=1,
forcing_terms=None, stiffness=None, damping=None):
"""Initialize the rhythmic DMP
Args:
num_dmps (int): number of DMPs
num_basis (int): number of basis functions
dt (float): step integration for Euler's method
y0 (float, np.array): initial position(s)
goal (float, np.array): goal(s)
forcing_terms (list, ForcingTerm): the forcing terms (which can have different basis functions)
stiffness (float): stiffness coefficient
damping (float): damping coefficient
"""
# create rhythmic canonical system
cs = RhythmicCS(dt=dt)
# create forcing terms (each one contains the basis functions and learnable weights)
if forcing_terms is None:
if isinstance(num_basis, int):
forcing_terms = [RhythmicForcingTerm(cs, num_basis) for _ in range(num_dmps)]
else:
if not isinstance(num_basis, (np.ndarray, list, tuple, set)):
raise TypeError("Expecting 'num_basis' to be an int, list, tuple, np.array or set.")
if len(num_basis) != num_dmps:
raise ValueError("The length of th list of number of basis doesn't match the number of DMPs")
forcing_terms = [RhythmicForcingTerm(cs, n_basis) for n_basis in num_basis]
# call super class constructor
super(RhythmicDMP, self).__init__(canonical_system=cs, forcing_term=forcing_terms, y0=y0, goal=goal,
stiffness=stiffness, damping=damping)
def get_scaling_term(self, new_goal=None):
"""
Return the scaling term for the forcing term. For rhythmic DMPs it's non-diminishing, so this function just
returns 1.
"""
return np.ones(self.num_dmps)
def _generate_goal(self, y_des):
"""Generate the goal for path imitation.
For rhythmic DMPs, the goal is the average of the desired trajectory.
Args:
y_des (float[M,T]): the desired trajectory to follow (with shape [num_dmps, timesteps])
Returns:
float[M]: goal positions (one for each DMP)
"""
goal = np.zeros(self.num_dmps)
for n in range(self.num_dmps):
num_idx = ~np.isnan(y_des[n]) # ignore nan's when calculating goal
goal[n] = .5 * (y_des[n, num_idx].min() + y_des[n, num_idx].max())
return goal
| 0 | 0 | 0 |
f435c309888d9e68dc96f1aa6e903d696c5cca03 | 656 | py | Python | xblock/test/test_fragment.py | cclauss/XBlock | 3e5341015e8f8b4a203ac4a41471bac5549182b0 | [
"Apache-2.0"
] | null | null | null | xblock/test/test_fragment.py | cclauss/XBlock | 3e5341015e8f8b4a203ac4a41471bac5549182b0 | [
"Apache-2.0"
] | null | null | null | xblock/test/test_fragment.py | cclauss/XBlock | 3e5341015e8f8b4a203ac4a41471bac5549182b0 | [
"Apache-2.0"
] | null | null | null | """
Unit tests for the Fragment class.
Note: this class has been deprecated in favor of web_fragments.fragment.Fragment
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from unittest import TestCase
from xblock.fragment import Fragment
class TestFragment(TestCase):
"""
Unit tests for fragments.
"""
def test_fragment(self):
"""
Test the delegated Fragment class.
"""
TEST_HTML = u'<p>Hello, world!</p>' # pylint: disable=invalid-name
fragment = Fragment()
fragment.add_content(TEST_HTML)
self.assertEqual(fragment.body_html(), TEST_HTML)
| 25.230769 | 82 | 0.690549 | """
Unit tests for the Fragment class.
Note: this class has been deprecated in favor of web_fragments.fragment.Fragment
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from unittest import TestCase
from xblock.fragment import Fragment
class TestFragment(TestCase):
"""
Unit tests for fragments.
"""
def test_fragment(self):
"""
Test the delegated Fragment class.
"""
TEST_HTML = u'<p>Hello, world!</p>' # pylint: disable=invalid-name
fragment = Fragment()
fragment.add_content(TEST_HTML)
self.assertEqual(fragment.body_html(), TEST_HTML)
| 0 | 0 | 0 |
c6bcb780f95fe8ae4a95bc9a59ae2ad9f8b29924 | 8,244 | py | Python | eval/cls_eval.py | EthanZhangYC/invariance-equivariance | 6e369fd6f43c6b217740f7acd9533c298c43d360 | [
"MIT"
] | 24 | 2021-04-21T09:35:23.000Z | 2022-02-28T12:44:39.000Z | eval/cls_eval.py | wct5217488/invariance-equivariance | 6dfadb39a485d0e55c1cd0c8ce0e0f6dfc602dd3 | [
"MIT"
] | 3 | 2021-05-12T19:09:13.000Z | 2021-08-23T17:17:10.000Z | eval/cls_eval.py | wct5217488/invariance-equivariance | 6dfadb39a485d0e55c1cd0c8ce0e0f6dfc602dd3 | [
"MIT"
] | 8 | 2021-06-09T02:41:37.000Z | 2022-02-27T02:14:17.000Z | from __future__ import print_function
import torch
import time
from tqdm import tqdm
from .util import AverageMeter, accuracy
import numpy as np
def validate(val_loader, model, criterion, opt):
"""One epoch validation"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
with tqdm(val_loader, total=len(val_loader)) as pbar:
end = time.time()
for idx, (input, target, _) in enumerate(pbar):
if(opt.simclr):
input = input[0].float()
else:
input = input.float()
if torch.cuda.is_available():
input = input.cuda()
target = target.cuda()
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
pbar.set_postfix({"Acc@1":'{0:.2f}'.format(top1.avg.cpu().numpy()),
"Acc@5":'{0:.2f}'.format(top1.avg.cpu().numpy(),2),
"Loss" :'{0:.2f}'.format(losses.avg,2),
})
# if idx % opt.print_freq == 0:
# print('Test: [{0}/{1}]\t'
# 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
# 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
# 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
# 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
# idx, len(val_loader), batch_time=batch_time, loss=losses,
# top1=top1, top5=top5))
print('Val_Acc@1 {top1.avg:.3f} Val_Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg, losses.avg
def embedding(val_loader, model, opt):
"""One epoch validation"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
with tqdm(val_loader, total=len(val_loader)) as pbar:
end = time.time()
for idx, (input, target, _) in enumerate(pbar):
if(opt.simclr):
input = input[0].float()
else:
input = input.float()
if torch.cuda.is_available():
input = input.cuda()
target = target.cuda()
batch_size = input.size()[0]
x = input
x_90 = x.transpose(2,3).flip(2)
x_180 = x.flip(2).flip(3)
x_270 = x.flip(2).transpose(2,3)
generated_data = torch.cat((x, x_90, x_180, x_270),0)
train_targets = target.repeat(4)
# compute output
# output = model(input)
(_,_,_,_, feat), (output, rot_logits) = model(generated_data, rot=True)
# loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output[:batch_size], target, topk=(1, 5))
# losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
if(idx==0):
embeddings = output
classes = train_targets
else:
embeddings = torch.cat((embeddings, output),0)
classes = torch.cat((classes, train_targets),0)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
pbar.set_postfix({"Acc@1":'{0:.2f}'.format(top1.avg.cpu().numpy()),
"Acc@5":'{0:.2f}'.format(top1.avg.cpu().numpy(),2)
})
# if idx % opt.print_freq == 0:
# print('Test: [{0}/{1}]\t'
# 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
# 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
# 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
# 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
# idx, len(val_loader), batch_time=batch_time, loss=losses,
# top1=top1, top5=top5))
print('Val_Acc@1 {top1.avg:.3f} Val_Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
print(embeddings.size())
print(classes.size())
np.save("embeddings.npy", embeddings.detach().cpu().numpy())
np.save("classes.npy", classes.detach().cpu().numpy())
# with tqdm(val_loader, total=len(val_loader)) as pbar:
# end = time.time()
# for idx, (input, target, _) in enumerate(pbar):
# if(opt.simclr):
# input = input[0].float()
# else:
# input = input.float()
# if torch.cuda.is_available():
# input = input.cuda()
# target = target.cuda()
# generated_data = torch.cat((x, x_180),0)
# # compute output
# # output = model(input)
# (_,_,_,_, feat), (output, rot_logits) = model(input, rot=True)
# # loss = criterion(output, target)
# # measure accuracy and record loss
# acc1, acc5 = accuracy(output, target, topk=(1, 5))
# # losses.update(loss.item(), input.size(0))
# top1.update(acc1[0], input.size(0))
# top5.update(acc5[0], input.size(0))
# if(idx==0):
# embeddings = output
# classes = target
# else:
# embeddings = torch.cat((embeddings, output),0)
# classes = torch.cat((classes, target),0)
# # measure elapsed time
# batch_time.update(time.time() - end)
# end = time.time()
# pbar.set_postfix({"Acc@1":'{0:.2f}'.format(top1.avg.cpu().numpy()),
# "Acc@5":'{0:.2f}'.format(top1.avg.cpu().numpy(),2)
# })
# # if idx % opt.print_freq == 0:
# # print('Test: [{0}/{1}]\t'
# # 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
# # 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
# # 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
# # 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
# # idx, len(val_loader), batch_time=batch_time, loss=losses,
# # top1=top1, top5=top5))
# print('Val_Acc@1 {top1.avg:.3f} Val_Acc@5 {top5.avg:.3f}'
# .format(top1=top1, top5=top5))
# print(embeddings.size())
# print(classes.size())
# np.save("embeddings.npy", embeddings.detach().cpu().numpy())
# np.save("classes.npy", classes.detach().cpu().numpy())
return top1.avg, top5.avg, losses.avg
| 40.610837 | 88 | 0.434983 | from __future__ import print_function
import torch
import time
from tqdm import tqdm
from .util import AverageMeter, accuracy
import numpy as np
def validate(val_loader, model, criterion, opt):
"""One epoch validation"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
with tqdm(val_loader, total=len(val_loader)) as pbar:
end = time.time()
for idx, (input, target, _) in enumerate(pbar):
if(opt.simclr):
input = input[0].float()
else:
input = input.float()
if torch.cuda.is_available():
input = input.cuda()
target = target.cuda()
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
pbar.set_postfix({"Acc@1":'{0:.2f}'.format(top1.avg.cpu().numpy()),
"Acc@5":'{0:.2f}'.format(top1.avg.cpu().numpy(),2),
"Loss" :'{0:.2f}'.format(losses.avg,2),
})
# if idx % opt.print_freq == 0:
# print('Test: [{0}/{1}]\t'
# 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
# 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
# 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
# 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
# idx, len(val_loader), batch_time=batch_time, loss=losses,
# top1=top1, top5=top5))
print('Val_Acc@1 {top1.avg:.3f} Val_Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg, losses.avg
def embedding(val_loader, model, opt):
"""One epoch validation"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
with tqdm(val_loader, total=len(val_loader)) as pbar:
end = time.time()
for idx, (input, target, _) in enumerate(pbar):
if(opt.simclr):
input = input[0].float()
else:
input = input.float()
if torch.cuda.is_available():
input = input.cuda()
target = target.cuda()
batch_size = input.size()[0]
x = input
x_90 = x.transpose(2,3).flip(2)
x_180 = x.flip(2).flip(3)
x_270 = x.flip(2).transpose(2,3)
generated_data = torch.cat((x, x_90, x_180, x_270),0)
train_targets = target.repeat(4)
# compute output
# output = model(input)
(_,_,_,_, feat), (output, rot_logits) = model(generated_data, rot=True)
# loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output[:batch_size], target, topk=(1, 5))
# losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
if(idx==0):
embeddings = output
classes = train_targets
else:
embeddings = torch.cat((embeddings, output),0)
classes = torch.cat((classes, train_targets),0)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
pbar.set_postfix({"Acc@1":'{0:.2f}'.format(top1.avg.cpu().numpy()),
"Acc@5":'{0:.2f}'.format(top1.avg.cpu().numpy(),2)
})
# if idx % opt.print_freq == 0:
# print('Test: [{0}/{1}]\t'
# 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
# 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
# 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
# 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
# idx, len(val_loader), batch_time=batch_time, loss=losses,
# top1=top1, top5=top5))
print('Val_Acc@1 {top1.avg:.3f} Val_Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
print(embeddings.size())
print(classes.size())
np.save("embeddings.npy", embeddings.detach().cpu().numpy())
np.save("classes.npy", classes.detach().cpu().numpy())
# with tqdm(val_loader, total=len(val_loader)) as pbar:
# end = time.time()
# for idx, (input, target, _) in enumerate(pbar):
# if(opt.simclr):
# input = input[0].float()
# else:
# input = input.float()
# if torch.cuda.is_available():
# input = input.cuda()
# target = target.cuda()
# generated_data = torch.cat((x, x_180),0)
# # compute output
# # output = model(input)
# (_,_,_,_, feat), (output, rot_logits) = model(input, rot=True)
# # loss = criterion(output, target)
# # measure accuracy and record loss
# acc1, acc5 = accuracy(output, target, topk=(1, 5))
# # losses.update(loss.item(), input.size(0))
# top1.update(acc1[0], input.size(0))
# top5.update(acc5[0], input.size(0))
# if(idx==0):
# embeddings = output
# classes = target
# else:
# embeddings = torch.cat((embeddings, output),0)
# classes = torch.cat((classes, target),0)
# # measure elapsed time
# batch_time.update(time.time() - end)
# end = time.time()
# pbar.set_postfix({"Acc@1":'{0:.2f}'.format(top1.avg.cpu().numpy()),
# "Acc@5":'{0:.2f}'.format(top1.avg.cpu().numpy(),2)
# })
# # if idx % opt.print_freq == 0:
# # print('Test: [{0}/{1}]\t'
# # 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
# # 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
# # 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
# # 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
# # idx, len(val_loader), batch_time=batch_time, loss=losses,
# # top1=top1, top5=top5))
# print('Val_Acc@1 {top1.avg:.3f} Val_Acc@5 {top5.avg:.3f}'
# .format(top1=top1, top5=top5))
# print(embeddings.size())
# print(classes.size())
# np.save("embeddings.npy", embeddings.detach().cpu().numpy())
# np.save("classes.npy", classes.detach().cpu().numpy())
return top1.avg, top5.avg, losses.avg
| 0 | 0 | 0 |
4a0fdc481c5a936d2863ba60f31ce7a87ac8d13b | 2,576 | py | Python | exampdftomindmap.py | synsandacks/CiscoExamPDFtoMindmap | fff0a10bcf18a2a2075e770b2305b038b1375de4 | [
"MIT"
] | 1 | 2022-02-10T09:31:50.000Z | 2022-02-10T09:31:50.000Z | exampdftomindmap.py | synsandacks/CiscoExamPDFtoMindmap | fff0a10bcf18a2a2075e770b2305b038b1375de4 | [
"MIT"
] | null | null | null | exampdftomindmap.py | synsandacks/CiscoExamPDFtoMindmap | fff0a10bcf18a2a2075e770b2305b038b1375de4 | [
"MIT"
] | 1 | 2022-02-09T21:19:38.000Z | 2022-02-09T21:19:38.000Z | import PyPDF2
import re
# Function that extracts the text from the supplied PDF and return the contents as a massive string.
# Function that takes a list of text ex. ['this', 'is', 'how', 'the', 'data', 'would', 'look']
# and iterate over that list to return a new list that groups exam objectives properly.
# Function to generate the md file leveraging the provided list from objectiveBuilder.
# Takes the exam string to be used as the top level of the mind map, the list to generate the rest of the mindmap
# and a string to be used for naming the output file.
if __name__ == '__main__':
main() | 35.287671 | 113 | 0.590062 | import PyPDF2
import re
# Function that extracts the text from the supplied PDF and return the contents as a massive string.
def pdftotext(pdffile):
pdfFile = open(pdffile, 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFile)
numPages = pdfReader.numPages
pdfText = ''
for page in range(numPages):
pdfPage = pdfReader.getPage(page)
pdfText += pdfPage.extractText()
pdfFile.close()
# Performing some clean up on the provided file.
pdfText = pdfText.split('any time without notice.')[1]
pattern = r'\d\d\d\d Cisco Systems, Inc. This document is Cisco Public. Page \d'
pdfText = pdfText.replace('\n', '')
pdfText = re.sub(pattern, '', pdfText)
pdfText.strip(' ')
return pdfText
# Function that takes a list of text ex. ['this', 'is', 'how', 'the', 'data', 'would', 'look']
# and iterate over that list to return a new list that groups exam objectives properly.
def objectiveBuilder(textList):
newlist = []
while len(textList) > 1:
loopString = ''
if re.match(r'\d\d%|\d\.\d', textList[0]):
loopString += textList[0]
textList.remove(textList[0])
while len(textList) > 1 and not re.match(r'\d\d%|\d\.[1-9]', textList[0]):
loopString += f' {textList[0]}'
textList.remove(textList[0])
newlist.append(loopString)
if not re.match(r'\d\d%|\d\.\d', textList[0]):
newlist[-1] += f' {textList[0]}'
textList = []
return newlist
# Function to generate the md file leveraging the provided list from objectiveBuilder.
# Takes the exam string to be used as the top level of the mind map, the list to generate the rest of the mindmap
# and a string to be used for naming the output file.
def makemd(exam, list, outfile):
with open(outfile, 'w') as f:
f.write(f'# {exam}\n')
for objective in list:
if re.search(r'\d\.0', objective):
f.write(f'## {objective}\n')
if re.search(r'\d\.[1-9]\s', objective):
f.write(f'### {objective}\n')
if re.search(r'\d\.\d\.[a-zA-Z]', objective):
f.write(f'#### {objective}\n')
f.close()
def main():
pdf = 'pdfs\\200-301-CCNA.pdf'
outFile = '200-301-CCNA.md'
exam = 'CCNA Exam v1.0 (CCNA 200-301)'
pdfText = pdftotext(pdf)
pdfText = pdfText.split()
objectives = objectiveBuilder(pdfText)
makemd(exam, objectives, outFile)
if __name__ == '__main__':
main() | 1,879 | 0 | 89 |
5f41b87dcf6b0dbdc532c835f9547fb846c9d202 | 317 | py | Python | MoCalculator/main.py | daveh07/AS4100-1998-Mo_Moment_Calculator | 5b83044d73bc78b12943e7b2175df6baf40a634b | [
"MIT"
] | null | null | null | MoCalculator/main.py | daveh07/AS4100-1998-Mo_Moment_Calculator | 5b83044d73bc78b12943e7b2175df6baf40a634b | [
"MIT"
] | null | null | null | MoCalculator/main.py | daveh07/AS4100-1998-Mo_Moment_Calculator | 5b83044d73bc78b12943e7b2175df6baf40a634b | [
"MIT"
] | null | null | null | # MEMBER CAPACITY OF SEGMENTS WITHOUT FULL LATERAL RESTRAINT
# IN ACCORDANCE WITH AS4100-1998 - SECTION 5.6
#
# V1.0 - 04/12/2020 by D.HILL
from userInputs import *
from alpha_m import *
from alpha_s import *
# Calculate Alpha M
print("Alpha_m = " + str((alpha_m_moment())))
# Calculate Alpha S
buckling_moment()
| 19.8125 | 60 | 0.731861 | # MEMBER CAPACITY OF SEGMENTS WITHOUT FULL LATERAL RESTRAINT
# IN ACCORDANCE WITH AS4100-1998 - SECTION 5.6
#
# V1.0 - 04/12/2020 by D.HILL
from userInputs import *
from alpha_m import *
from alpha_s import *
# Calculate Alpha M
print("Alpha_m = " + str((alpha_m_moment())))
# Calculate Alpha S
buckling_moment()
| 0 | 0 | 0 |
9a5198bc7713ce99f07d59ad097f76cd84eb5861 | 7,732 | py | Python | webProject/sport/models.py | mohammadrezasalehi95/webproject-backend | e7b697d6e1197d3781446f44904eabd3d5137c5a | [
"MIT"
] | null | null | null | webProject/sport/models.py | mohammadrezasalehi95/webproject-backend | e7b697d6e1197d3781446f44904eabd3d5137c5a | [
"MIT"
] | 7 | 2020-02-11T23:40:45.000Z | 2022-03-11T23:39:40.000Z | webProject/sport/models.py | mohammadrezasalehi95/webproject-backend | e7b697d6e1197d3781446f44904eabd3d5137c5a | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User
from django.utils import timezone
from model_utils import Choices
from pygments.lexers import get_all_lexers
from pygments.styles import get_all_styles
from django.contrib.auth.models import AbstractUser
from django.db import models
LEXERS = [item for item in get_all_lexers() if item[1]]
LANGUAGE_CHOICES = sorted([(item[1][0], item[0]) for item in LEXERS])
STYLE_CHOICES = sorted((item, item) for item in get_all_styles())
#
# class TeamGame(models.Model):
# team = models.ForeignKey(Team, on_delete=models.CASCADE)
# game = models.ForeignKey("Game", on_delete=models.CASCADE)
# against = models.CharField(max_length=20)
# date = models.DateField(blank=True)
# status = models.IntegerField(blank=True)
# score = models.IntegerField(blank=True)
# point = models.IntegerField(default=0, blank=True)
| 42.718232 | 105 | 0.725297 | from django.contrib.auth.models import User
from django.utils import timezone
from model_utils import Choices
from pygments.lexers import get_all_lexers
from pygments.styles import get_all_styles
from django.contrib.auth.models import AbstractUser
from django.db import models
LEXERS = [item for item in get_all_lexers() if item[1]]
LANGUAGE_CHOICES = sorted([(item[1][0], item[0]) for item in LEXERS])
STYLE_CHOICES = sorted((item, item) for item in get_all_styles())
class Team(models.Model):
name = models.CharField(max_length=20, primary_key=True)
bio = models.TextField(max_length=500,null=True,blank=True)
image = models.ImageField(upload_to='assets/sport/team', null=True, default='default_team.jpg')
#
# class TeamGame(models.Model):
# team = models.ForeignKey(Team, on_delete=models.CASCADE)
# game = models.ForeignKey("Game", on_delete=models.CASCADE)
# against = models.CharField(max_length=20)
# date = models.DateField(blank=True)
# status = models.IntegerField(blank=True)
# score = models.IntegerField(blank=True)
# point = models.IntegerField(default=0, blank=True)
class SiteUser(AbstractUser):
age = models.IntegerField(blank=True, null=True)
bio = models.TextField(max_length=2000, blank=True, null=True)
image = models.ImageField(upload_to='assets/sport/users', null=True, blank=True)
favoriteNews = models.ManyToManyField("New", blank=True)
favoriteGames = models.ManyToManyField("Game", blank=True)
def __str__(self):
return self.email
class Meta(AbstractUser.Meta):
swappable = 'AUTH_USER_MODEL'
class New(models.Model):
title = models.TextField(max_length=500)
subtitle = models.TextField(max_length=500)
content = models.TextField(max_length=2000)
releaseTime = models.DateTimeField(auto_now_add=True, null=True)
image = models.ImageField(upload_to='assets/sport/news', null=True)
source = models.CharField(max_length=20, null=True)
relateds = models.ManyToManyField("New", blank=True)
media = models.FileField(upload_to='assets/sport/news', null=True, blank=True)
likes = models.IntegerField(blank=True, default=0)
class Comment(models.Model):
user = models.ForeignKey(SiteUser, on_delete=models.CASCADE, null=True)
new = models.ForeignKey(New, on_delete=models.CASCADE)
time = models.DateTimeField(auto_now_add=True)
text = models.TextField(max_length=500)
class Profile(models.Model):
pid = models.IntegerField()
name = models.CharField(max_length=20)
bio = models.TextField(max_length=500)
gender = models.CharField(max_length=5)
image = models.ImageField(upload_to='assets/sport/players', null=True)
born = models.DateField(blank=True, null=True)
age = models.IntegerField(blank=True, null=True)
height = models.IntegerField(blank=True, null=True)
weight = models.IntegerField(blank=True, null=True)
currentTeam = models.ForeignKey(Team, on_delete=models.CASCADE, null=True)
national = models.CharField(max_length=20, null=True)
rule = models.CharField(max_length=20, null=True)
previousClub = models.CharField(max_length=20, null=True)
squad = models.CharField(max_length=20, null=True)
type = models.CharField(max_length=20, null=True, choices=(('F', 'FootBall'), ('B', 'BasketBall')))
class Game(models.Model):
team1 = models.ForeignKey(Team, related_name='home', on_delete=models.SET_NULL, null=True)
team2 = models.ForeignKey(Team, related_name='guest', on_delete=models.SET_NULL, null=True)
date = models.DateField(blank=True, default=timezone.now, null=True)
status = models.IntegerField(blank=True, default=2, null=True)
team1_score = models.IntegerField(blank=True, default=1, null=True)
team2_score = models.IntegerField(blank=True, default=1, null=True)
team1_point = models.IntegerField(default=0, blank=True, null=True)
team2_point = models.IntegerField(default=0, blank=True, null=True)
type = models.CharField(max_length=20, null=True, choices=(('F', 'FootBall'), ('B', 'BasketBall')))
bestPlayer = models.ForeignKey(Profile, on_delete=models.CASCADE, null=True,blank=True)
news = models.ManyToManyField(to=New,null=True,blank=True)
media1 = models.FileField(upload_to='assets/sport/games', null=True, blank=True)
media2 = models.FileField(upload_to='assets/sport/games', null=True, blank=True)
likes = models.IntegerField(blank=True, default=0)
competition=models.ForeignKey('Competition',on_delete=models.CASCADE)
class GameSpecialDetail(models.Model):
game = models.ForeignKey(Game, on_delete=models.CASCADE)
team1 = models.IntegerField(blank=True)
team2 = models.IntegerField(blank=True)
title = models.CharField(max_length=20, null=True)
class Game_Player(models.Model):
game = models.ForeignKey(Game, on_delete=models.CASCADE)
pid = models.ForeignKey(Profile, on_delete=models.CASCADE, null=True)
name = models.CharField(max_length=20)
post = models.CharField(max_length=20, blank=True)
changingTime = models.CharField(max_length=20, blank=True)
playTime = models.IntegerField(blank=True)
class Game_Report(models.Model):
game = models.OneToOneField(Game, on_delete=models.CASCADE, primary_key=True)
last_report = models.TextField(max_length=500)
class Game_Event(models.Model):
game = models.ForeignKey(Game, on_delete=models.CASCADE)
time = models.DateTimeField(blank=True)
text = models.TextField(blank=True)
class Competition(models.Model):
name = models.CharField(max_length=20,primary_key=True )
type = Choices('League', 'Cup')
field = models.CharField(max_length=1, choices=(('F', 'FootBall'), ('B', 'BasketBall')))
current = models.BooleanField(default=True)
image = models.ImageField(upload_to='assets/sport/competition', blank=True, null=True)
class Cup(Competition):
team_number = models.IntegerField(choices=((4, 4), (8, 8), (16, 16), (32, 32), (64, 64), (128, 128)),
default=16,
null=True,
blank=True)
class League(Competition):
team_number = models.IntegerField(null=True, blank=True)
class LeagueRow(models.Model):
league = models.ForeignKey(League, on_delete=models.CASCADE, blank=True, null=True)
team = models.ForeignKey(Team, on_delete=models.SET_NULL, null=True)
finished_game = models.IntegerField(blank=True)
win = models.IntegerField(blank=True)
lose = models.IntegerField(blank=True)
equal = models.IntegerField(blank=True)
point = models.IntegerField(blank=True)
gf = models.IntegerField(blank=True)
ga = models.IntegerField(blank=True) # recieved goal
def different_goal(self):
return self.gf - self.ga
class FootBallSeasonDetail(models.Model):
profile = models.ForeignKey(Profile, on_delete=models.CASCADE)
season = models.CharField(max_length=20, blank=True, null=True)
goals = models.IntegerField(null=True, blank=True)
goalPass = models.IntegerField(null=True, blank=True)
cards = models.IntegerField(null=True, blank=True)
class BasketSeasonDetail(models.Model):
profile = models.ForeignKey(Profile, on_delete=models.CASCADE)
season = models.CharField(max_length=20)
twoscoreGoals = models.IntegerField(null=True, blank=True)
threescoreGoals = models.IntegerField(null=True, blank=True)
fault = models.IntegerField(null=True, blank=True)
ribsndhs = models.IntegerField(null=True, blank=True)
playTime = models.IntegerField(null=True, blank=True)
class CupRow(models.Model):
cup=models.ForeignKey(to=Cup)
place=models.IntegerField(unique=True)
pass
| 60 | 6,392 | 391 |
4215a859c4f0ee1fa3f5c5043ebaff2722426e5a | 277 | py | Python | src/button.py | 3dani33/ePaper_polaroid | 54d98ac3492ecf5974254ac6651295affb23cb88 | [
"MIT"
] | null | null | null | src/button.py | 3dani33/ePaper_polaroid | 54d98ac3492ecf5974254ac6651295affb23cb88 | [
"MIT"
] | null | null | null | src/button.py | 3dani33/ePaper_polaroid | 54d98ac3492ecf5974254ac6651295affb23cb88 | [
"MIT"
] | null | null | null | import RPi.GPIO as GPIO
BUTTON_PIN = 4
if __name__ == '__main__':
setup()
while True:
GPIO.wait_for_edge(BUTTON_PIN, GPIO.RISING)
print('button!') | 21.307692 | 63 | 0.66787 | import RPi.GPIO as GPIO
BUTTON_PIN = 4
def setup():
GPIO.setmode(GPIO.BCM)
GPIO.setup(BUTTON_PIN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
if __name__ == '__main__':
setup()
while True:
GPIO.wait_for_edge(BUTTON_PIN, GPIO.RISING)
print('button!') | 82 | 0 | 23 |
6da0e91d33eed83fe5c7b0ac6a350c323b69f234 | 1,848 | py | Python | lustre/precomputation.py | half-cambodian-hacker-man/lustre | 93e2196a962cafcfd7fa0be93a6b0d563c46ba75 | [
"MIT"
] | 3 | 2020-09-06T02:21:09.000Z | 2020-09-30T00:05:54.000Z | lustre/precomputation.py | videogame-hacker/lustre | 93e2196a962cafcfd7fa0be93a6b0d563c46ba75 | [
"MIT"
] | null | null | null | lustre/precomputation.py | videogame-hacker/lustre | 93e2196a962cafcfd7fa0be93a6b0d563c46ba75 | [
"MIT"
] | null | null | null | import typing
import importlib, importlib.resources
from markupsafe import Markup
from .templating import set_template_global
| 28.875 | 83 | 0.621753 | import typing
import importlib, importlib.resources
from markupsafe import Markup
from .templating import set_template_global
class Precomputation: # TODO: Can we think of a better name for this?
def __init__(self, precomputation_package: str):
self.precomputation_package = precomputation_package
self.cache = {}
def text_file(self, path: str) -> typing.TextIO:
directories, slash, resource = path.rpartition("/")
package = self.precomputation_package
if slash:
package += "." + ".".join(directories.split("/"))
return importlib.resources.open_text(package, resource)
def __call__(self, *args, **kwargs) -> Markup:
return Markup(self.get(*args, **kwargs))
def _generate_identifier(self, name: str, *args, **kwargs):
yield name
yield "("
if args:
yield repr(args)[1:-2]
if args and kwargs:
yield ", "
if kwargs:
yield "**"
yield repr(kwargs)
yield ")"
def get(self, processor: str, *args, **kwargs) -> typing.Any:
identifier = "".join(self._generate_identifier(processor, *args, **kwargs))
if identifier in self.cache:
return self.cache.get(identifier)
try:
result = importlib.import_module(
f"{self.precomputation_package}.{processor}"
).process(self, *args, **kwargs)
self.cache[identifier] = result
return result
except ModuleNotFoundError:
return None
class PrecomputationAppMixin:
def __init__(self):
self.precomputation = None
def setup_precomputation(self, precomp_package: str):
self.precomputation = Precomputation(precomp_package)
set_template_global("precomp", self.precomputation)
| 1,428 | 57 | 233 |
2adffddfdd68ffd15f238992beb73b2d8929d348 | 2,847 | py | Python | tests/junk/recall/train_keras.py | imandr/gradnet | 72b9b140cb3f43224a11310b115480fb42820546 | [
"BSD-3-Clause"
] | null | null | null | tests/junk/recall/train_keras.py | imandr/gradnet | 72b9b140cb3f43224a11310b115480fb42820546 | [
"BSD-3-Clause"
] | null | null | null | tests/junk/recall/train_keras.py | imandr/gradnet | 72b9b140cb3f43224a11310b115480fb42820546 | [
"BSD-3-Clause"
] | null | null | null | from generator import Generator
import numpy as np, random
np.set_printoptions(precision=4, suppress=True, linewidth=132)
from tensorflow import keras
from tensorflow.keras.layers import LSTM, Dense, Input
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adagrad
if __name__ == '__main__':
nwords = 10
length = 50
distance = 5
r = 2
batch_size = 5
g = Generator(nwords, distance, r)
model = create_net(nwords, batch_size)
train(model, g, length, batch_size)
| 33.494118 | 118 | 0.60274 | from generator import Generator
import numpy as np, random
np.set_printoptions(precision=4, suppress=True, linewidth=132)
from tensorflow import keras
from tensorflow.keras.layers import LSTM, Dense, Input
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adagrad
def create_net(nwords, batch_size, hidden=100):
inp = Input((None, nwords), batch_size=batch_size)
r1 = LSTM(hidden, return_sequences=True, stateful=True)(inp)
#r2 = LSTM(hidden, return_sequences=True)(r1)
probs = Dense(nwords, activation="softmax")(r1)
model = Model(inp, probs)
model.compile(optimizer=Adagrad(learning_rate=0.01), loss="categorical_crossentropy")
return model
def generate_from_model(model, g, length, batch_size):
#print("------- generate ----------")
model.reset_states()
nwords = g.NWords
rows = []
row = [random.randint(0, nwords-1) for _ in range(batch_size)] # [w]
rows.append(row)
for t in range(length-1):
x = np.array([g.vectorize(xi) for xi in row])
y = model.predict(x[:,None,:])[:,0,:] # y: [mb, w], t=0
pvec = y**3
pvec = pvec/np.sum(pvec, axis=-1, keepdims=True) # -> [mb, w]
row = [np.random.choice(nwords, p=p) for p in pvec]
rows.append(row)
rows = np.array(rows) # [t,mb]
return rows.transpose((1,0))
def generate_batch(g, length, batch_size):
#print("generate_batch(%s, %s)..." % (length, batch_size))
sequences = np.array([g.generate(length+1, as_vectors=True) for _ in range(batch_size)])
#print("sequences:", sequences.shape)
x = sequences[:,:-1,:]
y_ = sequences[:,1:,:]
return x, y_
def train(model, g, length, batch_size):
valid_ma = 0.0
steps = 0
for iteration in range(100000):
#print
x, y_ = generate_batch(g, length, batch_size)
loss = model.train_on_batch(x, y_)
if iteration and iteration % 50 == 0:
generated = generate_from_model(model, g, length, batch_size)[0]
#print(type(generated), generated.shape, generated)
valid_length = g.validate(generated)
valid_ma += 0.1*(valid_length-valid_ma)
if iteration % 100 == 0:
print(generated[:valid_length], "*", generated[valid_length:], " valid length:", valid_length)
print("Batches:", iteration, " steps:", iteration*length*batch_size, " loss/step:", loss/x.shape[1],
" moving average:", valid_ma)
if __name__ == '__main__':
nwords = 10
length = 50
distance = 5
r = 2
batch_size = 5
g = Generator(nwords, distance, r)
model = create_net(nwords, batch_size)
train(model, g, length, batch_size)
| 2,227 | 0 | 96 |
6500f03260a475dd256be21b2448a300479360eb | 29,087 | py | Python | resolwe_bio/processes/alignment/star.py | plojyon/resolwe-bio | 45d001a78fcc387b5e3239a34c9da7f40d789022 | [
"Apache-2.0"
] | 12 | 2015-12-07T18:29:27.000Z | 2022-03-16T08:00:18.000Z | resolwe_bio/processes/alignment/star.py | plojyon/resolwe-bio | 45d001a78fcc387b5e3239a34c9da7f40d789022 | [
"Apache-2.0"
] | 480 | 2015-11-20T21:46:43.000Z | 2022-03-28T12:40:57.000Z | resolwe_bio/processes/alignment/star.py | plojyon/resolwe-bio | 45d001a78fcc387b5e3239a34c9da7f40d789022 | [
"Apache-2.0"
] | 45 | 2015-11-19T14:54:07.000Z | 2022-02-13T21:36:50.000Z | """Align reads with STAR aligner."""
import gzip
import shutil
from pathlib import Path
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
FloatField,
GroupField,
IntegerField,
Process,
SchedulingClass,
StringField,
)
SPECIES = [
"Caenorhabditis elegans",
"Cricetulus griseus",
"Dictyostelium discoideum",
"Dictyostelium purpureum",
"Drosophila melanogaster",
"Homo sapiens",
"Macaca mulatta",
"Mus musculus",
"Odocoileus virginianus texanus",
"Rattus norvegicus",
"Solanum tuberosum",
]
def get_fastq_name(fastq_path):
"""Get the name of the FASTQ file."""
fastq_file = fastq_path.name
assert fastq_file.endswith(".fastq.gz")
return fastq_file[:-9]
def concatenate_files(filenames, out_fname):
"""Concatenate and decompress files."""
with open(out_fname, "w") as outfile:
for fname in filenames:
assert Path(fname).suffix == ".gz"
with gzip.open(fname, "rt") as infile:
# Speed up file copy by increasing the buffersize [length].
# https://blogs.blumetech.com/blumetechs-tech-blog/2011/05/faster-python-file-copy.html
shutil.copyfileobj(fsrc=infile, fdst=outfile, length=10485760)
class AlignmentStar(Process):
"""Align reads with STAR aligner.
Spliced Transcripts Alignment to a Reference (STAR) software is
based on an alignment algorithm that uses sequential maximum
mappable seed search in uncompressed suffix arrays followed by seed
clustering and stitching procedure. In addition to unbiased de novo
detection of canonical junctions, STAR can discover non-canonical
splices and chimeric (fusion) transcripts, and is also capable of
mapping full-length RNA sequences. More information can be found in
the [STAR manual](https://github.com/alexdobin/STAR/blob/master/doc/STARmanual.pdf)
and in the [original paper](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3530905/).
"""
slug = "alignment-star"
name = "STAR"
process_type = "data:alignment:bam:star"
version = "3.0.2"
category = "Align"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/rnaseq:5.11.0"}
},
"resources": {
"cores": 10,
"memory": 36864,
},
}
data_name = "{{ reads|sample_name|default('?') }}"
class Input:
"""Input fields to process AlignmentStar."""
reads = DataField("reads:fastq", label="Input reads (FASTQ)")
genome = DataField(
"index:star",
label="Indexed reference genome",
description="Genome index prepared by STAR aligner indexing tool.",
)
annotation = DataField(
"annotation",
label="Annotation file (GTF/GFF3)",
required=False,
description="Insert known annotations into genome indices at the mapping stage.",
)
unstranded = BooleanField(
label="The data is unstranded [--outSAMstrandField intronMotif]",
default=False,
description="For unstranded RNA-seq data, Cufflinks/Cuffdiff require spliced "
"alignments with XS strand attribute, which STAR will generate with "
"--outSAMstrandField intronMotif option. As required, the XS strand attribute will be "
"generated for all alignments that contain splice junctions. The spliced alignments "
"that have undefined strand (i.e. containing only non-canonical unannotated "
"junctions) will be suppressed. If you have stranded RNA-seq data, you do not need to "
"use any specific STAR options. Instead, you need to run Cufflinks with the library "
"option --library-type options. For example, cufflinks --library-type fr-firststrand "
"should be used for the standard dUTP protocol, including Illumina's stranded "
"Tru-Seq. This option has to be used only for Cufflinks runs and not for STAR runs.",
)
noncannonical = BooleanField(
label="Remove non-cannonical junctions (Cufflinks compatibility)",
default=False,
description="It is recommended to remove the non-canonical junctions for Cufflinks "
"runs using --outFilterIntronMotifs RemoveNoncanonical.",
)
class AnnotationOptions:
"""Annotation file options."""
feature_exon = StringField(
label="Feature type [--sjdbGTFfeatureExon]",
default="exon",
description="Feature type in GTF file to be used as exons for building "
"transcripts.",
)
sjdb_overhang = IntegerField(
label="Junction length [--sjdbOverhang]",
default=100,
description="This parameter specifies the length of the genomic sequence around "
"the annotated junction to be used in constructing the splice junction database. "
"Ideally, this length should be equal to the ReadLength-1, where ReadLength is "
"the length of the reads. For instance, for Illumina 2x100b paired-end reads, the "
"ideal value is 100-1=99. In the case of reads of varying length, the ideal value "
"is max(ReadLength)-1. In most cases, the default value of 100 will work as well "
"as the ideal value.",
)
class ChimericReadsOptions:
"""Chimeric reads options."""
chimeric = BooleanField(
label="Detect chimeric and circular alignments [--chimOutType SeparateSAMold]",
default=False,
description="To switch on detection of chimeric (fusion) alignments (in addition "
"to normal mapping), --chimSegmentMin should be set to a positive value. Each "
"chimeric alignment consists of two segments.Each segment is non-chimeric on "
"its own, but the segments are chimeric to each other (i.e. the segments belong "
"to different chromosomes, or different strands, or are far from each other). "
"Both segments may contain splice junctions, and one of the segments may contain "
"portions of both mates. --chimSegmentMin parameter controls the minimum mapped "
"length of the two segments that is allowed. For example, if you have 2x75 reads "
"and used --chimSegmentMin 20, a chimeric alignment with 130b on one chromosome "
"and 20b on the other will be output, while 135 + 15 won't be.",
)
chim_segment_min = IntegerField(
label="Minimum length of chimeric segment [--chimSegmentMin]",
default=20,
disabled="!detect_chimeric.chimeric",
)
class TranscriptOutputOptions:
"""Transcript coordinate output options."""
quant_mode = BooleanField(
label="Output in transcript coordinates [--quantMode]",
default=False,
description="With --quantMode TranscriptomeSAM option STAR will output alignments "
"translated into transcript coordinates in the Aligned.toTranscriptome.out.bam "
"file (in addition to alignments in genomic coordinates in Aligned.*.sam/bam "
"files). These transcriptomic alignments can be used with various transcript "
"quantification software that require reads to be mapped to transcriptome, such "
"as RSEM or eXpress.",
)
single_end = BooleanField(
label="Allow soft-clipping and indels [--quantTranscriptomeBan Singleend]",
default=False,
disabled="!t_coordinates.quant_mode",
description="By default, the output satisfies RSEM requirements: soft-clipping or "
"indels are not allowed. Use --quantTranscriptomeBan Singleend to allow "
"insertions, deletions and soft-clips in the transcriptomic alignments, which "
"can be used by some expression quantification softwares (e.g. eXpress).",
)
class FilteringOptions:
"""Output filtering options."""
out_filter_type = StringField(
label="Type of filtering [--outFilterType]",
default="Normal",
choices=[
("Normal", "Normal"),
("BySJout", "BySJout"),
],
description="Normal: standard filtering using only current alignment; BySJout: "
"keep only those reads that contain junctions that passed filtering into "
"SJ.out.tab.",
)
out_multimap_max = IntegerField(
label="Maximum number of loci [--outFilterMultimapNmax]",
required=False,
description="Maximum number of loci the read is allowed to map to. Alignments "
"(all of them) will be output only if the read maps to no more loci than this "
"value. Otherwise no alignments will be output, and the read will be counted as "
"'mapped to too many loci' (default: 10).",
)
out_mismatch_max = IntegerField(
label="Maximum number of mismatches [--outFilterMismatchNmax]",
required=False,
description="Alignment will be output only if it has fewer mismatches than this "
"value (default: 10). Large number (e.g. 999) switches off this filter.",
)
out_mismatch_nl_max = FloatField(
label="Maximum no. of mismatches (map length) [--outFilterMismatchNoverLmax]",
required=False,
range=[0.0, 1.0],
description="Alignment will be output only if its ratio of mismatches to *mapped* "
"length is less than or equal to this value (default: 0.3). The value should be "
"between 0.0 and 1.0.",
)
out_score_min = IntegerField(
label="Minumum alignment score [--outFilterScoreMin]",
required=False,
description="Alignment will be output only if its score is higher than or equal "
"to this value (default: 0).",
)
out_mismatch_nrl_max = FloatField(
label="Maximum no. of mismatches (read length) [--outFilterMismatchNoverReadLmax]",
required=False,
range=[0.0, 1.0],
description="Alignment will be output only if its ratio of mismatches to *read* "
"length is less than or equal to this value (default: 1.0). Using 0.04 for "
"2x100bp, the max number of mismatches is calculated as 0.04*200=8 for the paired "
"read. The value should be between 0.0 and 1.0.",
)
class AlignmentOptions:
"""Alignment options."""
align_overhang_min = IntegerField(
label="Minimum overhang [--alignSJoverhangMin]",
required=False,
description="Minimum overhang (i.e. block size) for spliced alignments "
"(default: 5).",
)
align_sjdb_overhang_min = IntegerField(
label="Minimum overhang (sjdb) [--alignSJDBoverhangMin]",
required=False,
description="Minimum overhang (i.e. block size) for annotated (sjdb) spliced "
"alignments (default: 3).",
)
align_intron_size_min = IntegerField(
label="Minimum intron size [--alignIntronMin]",
required=False,
description="Minimum intron size: the genomic gap is considered an intron if its "
"length >= alignIntronMin, otherwise it is considered Deletion (default: 21).",
)
align_intron_size_max = IntegerField(
label="Maximum intron size [--alignIntronMax]",
required=False,
description="Maximum intron size, if 0, max intron size will be determined by "
"(2pow(winBinNbits)*winAnchorDistNbins)(default: 0).",
)
align_gap_max = IntegerField(
label="Minimum gap between mates [--alignMatesGapMax]",
required=False,
description="Maximum gap between two mates, if 0, max intron gap will be "
"determined by (2pow(winBinNbits)*winAnchorDistNbins) (default: 0).",
)
align_end_alignment = StringField(
label="Read ends alignment [--alignEndsType]",
required=False,
choices=[
("Local", "Local"),
("EndToEnd", "EndToEnd"),
("Extend5pOfRead1", "Extend5pOfRead1"),
("Extend5pOfReads12", "Extend5pOfReads12"),
],
description="Type of read ends alignment (default: Local). Local: standard local "
"alignment with soft-clipping allowed. EndToEnd: force end-to-end read alignment, "
"do not soft-clip. Extend5pOfRead1: fully extend only the 5p of the read1, all "
"other ends: local alignment. Extend5pOfReads12: fully extend only the 5' of the "
"both read1 and read2, all other ends use local alignment.",
)
class TwoPassOptions:
"""Two-pass mapping options."""
two_pass_mode = BooleanField(
label="Use two pass mode [--twopassMode]",
default=False,
description="Use two-pass maping instead of first-pass only. In two-pass mode we "
"first perform first-pass mapping, extract junctions, insert them into genome "
"index, and re-map all reads in the second mapping pass.",
)
class OutputOptions:
"""Output options."""
out_unmapped = BooleanField(
label="Output unmapped reads (SAM) [--outSAMunmapped Within]",
default=False,
description="Output of unmapped reads in the SAM format.",
)
out_sam_attributes = StringField(
label="Desired SAM attributes [--outSAMattributes]",
default="Standard",
choices=[
("Standard", "Standard"),
("All", "All"),
("NH HI NM MD", "NH HI NM MD"),
("None", "None"),
],
description="A string of desired SAM attributes, in the order desired for the "
"output SAM.",
)
out_rg_line = StringField(
label="SAM/BAM read group line [--outSAMattrRGline]",
required=False,
description="The first word contains the read group identifier and must start "
"with ID:, e.g. --outSAMattrRGline ID:xxx CN:yy ”DS:z z z” xxx will be added as "
"RG tag to each output alignment. Any spaces in the tag values have to be double "
"quoted. Comma separated RG lines correspons to different (comma separated) input "
"files in –readFilesIn. Commas have to be surrounded by spaces, e.g. "
"–outSAMattrRGline ID:xxx , ID:zzz ”DS:z z” , ID:yyy DS:yyyy.",
)
class Limits:
"""Limits."""
limit_buffer_size = IntegerField(
label="Buffer size [--limitIObufferSize]",
default=150000000,
description="Maximum available buffers size (bytes) for input/output, per thread.",
)
limit_sam_records = IntegerField(
label="Maximum size of the SAM record [--limitOutSAMoneReadBytes]",
default=100000,
description="Maximum size of the SAM record (bytes) for one read. Recommended "
"value: >(2*(LengthMate1+LengthMate2+100)*outFilterMultimapNmax.",
)
limit_junction_reads = IntegerField(
label="Maximum number of junctions [--limitOutSJoneRead]",
default=1000,
description="Maximum number of junctions for one read (including all "
"multi-mappers).",
)
limit_collapsed_junctions = IntegerField(
label="Maximum number of collapsed junctions [--limitOutSJcollapsed]",
default=1000000,
)
limit_inserted_junctions = IntegerField(
label="Maximum number of junction to be inserted [--limitSjdbInsertNsj]",
default=1000000,
description="Maximum number of junction to be inserted to the genome on the fly "
"at the mapping stage, including those from annotations and those detected in the "
"1st step of the 2-pass run.",
)
annotation_options = GroupField(
AnnotationOptions, label="Annotation file options", hidden="!annotation"
)
detect_chimeric = GroupField(
ChimericReadsOptions, label="Chimeric and circular alignments"
)
t_coordinates = GroupField(
TranscriptOutputOptions, label="Transcript coordinates output"
)
filtering = GroupField(FilteringOptions, label="Output Filtering")
alignment = GroupField(AlignmentOptions, label="Alignment and Seeding")
two_pass_mapping = GroupField(TwoPassOptions, label="Two-pass mapping")
output_options = GroupField(OutputOptions, label="Output options")
limits = GroupField(Limits, label="Limits")
class Output:
"""Output fields to process AlignmentStar."""
bam = FileField(label="Alignment file")
bai = FileField(label="BAM file index")
unmapped_1 = FileField(label="Unmapped reads (mate 1)", required=False)
unmapped_2 = FileField(label="Unmapped reads (mate 2)", required=False)
sj = FileField(label="Splice junctions")
chimeric = FileField(label="Chimeric alignments", required=False)
alignment_transcriptome = FileField(
label="Alignment (trancriptome coordinates)", required=False
)
stats = FileField(label="Statistics")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
try:
if (
inputs.reads.entity.descriptor["general"]["species"]
!= inputs.genome.output.species
):
self.warning(
f"Species of reads ({inputs.reads.entity.descriptor['general']['species']}) "
f"and genome ({inputs.genome.output.species}) do not match."
)
except KeyError:
if inputs.genome.output.species in SPECIES:
self.update_entity_descriptor(
{"general.species": inputs.genome.output.species}
)
self.info(
"Sample species was automatically annotated to match the genome."
)
mate1_name = get_fastq_name(Path(inputs.reads.output.fastq[0].path))
mate_1 = [fastq.path for fastq in inputs.reads.output.fastq]
concatenated_r1 = "mate_1.fastq"
try:
concatenate_files(filenames=mate_1, out_fname=concatenated_r1)
except Exception as error:
self.error(
f"Failed to concatenate FASTQ files (mate 1). The error was: {error}"
)
if inputs.reads.type.startswith("data:reads:fastq:paired:"):
mate2_name = get_fastq_name(Path(inputs.reads.output.fastq2[0].path))
mate_2 = [fastq.path for fastq in inputs.reads.output.fastq2]
concatenated_r2 = "mate_2.fastq"
try:
concatenate_files(filenames=mate_2, out_fname=concatenated_r2)
except Exception as error:
self.error(
f"Failed to concatenate FASTQ files (mate 2). The error was: {error}"
)
self.progress(0.05)
star_params = [
"--runThreadN",
self.requirements.resources.cores,
"--genomeDir",
inputs.genome.output.index.path,
"--outReadsUnmapped",
"Fastx",
"--limitIObufferSize",
inputs.limits.limit_buffer_size,
"--limitOutSAMoneReadBytes",
inputs.limits.limit_sam_records,
"--limitOutSJoneRead",
inputs.limits.limit_junction_reads,
"--limitOutSJcollapsed",
inputs.limits.limit_collapsed_junctions,
"--limitSjdbInsertNsj",
inputs.limits.limit_inserted_junctions,
"--outFilterType",
inputs.filtering.out_filter_type,
"--outSAMtype",
"BAM",
"Unsorted",
]
if inputs.reads.type.startswith("data:reads:fastq:single:"):
star_params.extend(["--readFilesIn", concatenated_r1])
else:
star_params.extend(["--readFilesIn", concatenated_r1, concatenated_r2])
if inputs.annotation:
star_params.extend(
[
"--sjdbGTFfile",
inputs.annotation.output.annot.path,
"--sjdbOverhang",
inputs.annotation_options.sjdb_overhang,
"--sjdbGTFfeatureExon",
inputs.annotation_options.feature_exon,
]
)
if inputs.annotation.type.startswith("data:annotation:gff3:"):
star_params.extend(["--sjdbGTFtagExonParentTranscript", "Parent"])
if inputs.unstranded:
star_params.extend(["--outSAMstrandField", "intronMotif"])
if inputs.noncannonical:
star_params.extend(["--outFilterIntronMotifs", "RemoveNoncanonical"])
if inputs.detect_chimeric.chimeric:
star_params.extend(
[
"--chimOutType",
"SeparateSAMold",
"--chimSegmentMin",
inputs.detect_chimeric.chim_segment_min,
]
)
if inputs.t_coordinates.quant_mode:
gene_segments = Path(inputs.genome.output.index.path) / "geneInfo.tab"
if not gene_segments.is_file() and not inputs.annotation:
self.error(
"Output in transcript coordinates requires genome annotation file."
)
star_params.extend(["--quantMode", "TranscriptomeSAM"])
if inputs.t_coordinates.single_end:
star_params.extend(["--quantTranscriptomeBan", "Singleend"])
if inputs.filtering.out_multimap_max:
star_params.extend(
["--outFilterMultimapNmax", inputs.filtering.out_multimap_max]
)
if inputs.filtering.out_mismatch_max:
star_params.extend(
["--outFilterMismatchNmax", inputs.filtering.out_mismatch_max]
)
if inputs.filtering.out_mismatch_nl_max:
star_params.extend(
["--outFilterMismatchNoverLmax", inputs.filtering.out_mismatch_nl_max]
)
if inputs.filtering.out_score_min:
star_params.extend(["--outFilterScoreMin", inputs.filtering.out_score_min])
if inputs.filtering.out_mismatch_nrl_max:
star_params.extend(
[
"--outFilterMismatchNoverReadLmax",
inputs.filtering.out_mismatch_nrl_max,
]
)
if inputs.alignment.align_overhang_min:
star_params.extend(
["--alignSJoverhangMin", inputs.alignment.align_overhang_min]
)
if inputs.alignment.align_sjdb_overhang_min:
star_params.extend(
["--alignSJDBoverhangMin", inputs.alignment.align_sjdb_overhang_min]
)
if inputs.alignment.align_intron_size_min:
star_params.extend(
["--alignIntronMin", inputs.alignment.align_intron_size_min]
)
if inputs.alignment.align_intron_size_max:
star_params.extend(
["--alignIntronMax", inputs.alignment.align_intron_size_max]
)
if inputs.alignment.align_gap_max:
star_params.extend(["--alignMatesGapMax", inputs.alignment.align_gap_max])
if inputs.alignment.align_end_alignment:
star_params.extend(
["--alignMatesGapMax", inputs.alignment.align_end_alignment]
)
if inputs.two_pass_mapping.two_pass_mode:
star_params.extend(["--twopassMode", "Basic"])
if inputs.output_options.out_unmapped:
star_params.extend(["--outSAMunmapped", "Within"])
if inputs.output_options.out_sam_attributes:
# Create a list from string of out_sam_attributes to avoid unknown/unimplemented
# SAM attrribute error due to Plumbum command passing problems.
attributes = inputs.output_options.out_sam_attributes.split(" ")
star_params.extend(["--outSAMattributes", attributes])
if inputs.output_options.out_rg_line:
star_params.extend(
["--outSAMattrRGline", inputs.output_options.out_rg_line]
)
self.progress(0.1)
return_code, _, _ = Cmd["STAR"][star_params] & TEE(retcode=None)
log_file = Path("Log.out")
# Log contains useful information for debugging.
if log_file.is_file():
with open(log_file, "r") as log:
print(log.read())
if return_code:
self.error("Reads alignment failed.")
self.progress(0.7)
star_unmapped_r1 = Path("Unmapped.out.mate1")
if star_unmapped_r1.is_file():
unmapped_out_1 = f"{mate1_name}_unmapped.out.mate1.fastq"
star_unmapped_r1.rename(unmapped_out_1)
return_code, _, _ = Cmd["pigz"][unmapped_out_1] & TEE(retcode=None)
if return_code:
self.error("Compression of unmapped mate 1 reads failed.")
outputs.unmapped_1 = f"{unmapped_out_1}.gz"
star_unmapped_r2 = Path("Unmapped.out.mate2")
if (
inputs.reads.type.startswith("data:reads:fastq:paired:")
and star_unmapped_r2.is_file()
):
unmapped_out_2 = f"{mate2_name}_unmapped.out.mate2.fastq"
star_unmapped_r2.rename(unmapped_out_2)
return_code, _, _ = Cmd["pigz"][unmapped_out_2] & TEE(retcode=None)
if return_code:
self.error("Compression of unmapped mate 2 reads failed.")
outputs.unmapped_2 = f"{unmapped_out_2}.gz"
self.progress(0.8)
out_bam = f"{mate1_name}.bam"
out_bai = f"{out_bam}.bai"
sort_params = [
"Aligned.out.bam",
"-o",
out_bam,
"-@",
self.requirements.resources.cores,
]
return_code, _, _ = Cmd["samtools"]["sort"][sort_params] & TEE(retcode=None)
if return_code:
self.error("Samtools sort command failed.")
outputs.bam = out_bam
return_code, _, _ = Cmd["samtools"]["index"][out_bam, out_bai] & TEE(
retcode=None
)
if return_code:
self.error("Samtools index command failed.")
outputs.bai = out_bai
self.progress(0.9)
if inputs.detect_chimeric.chimeric:
out_chimeric = f"{mate1_name}_chimeric.out.sam"
Path("Chimeric.out.sam").rename(out_chimeric)
outputs.chimeric = out_chimeric
if inputs.t_coordinates.quant_mode:
out_transcriptome = f"{mate1_name}_aligned.toTranscriptome.out.bam"
Path("Aligned.toTranscriptome.out.bam").rename(out_transcriptome)
outputs.alignment_transcriptome = out_transcriptome
out_stats = f"{mate1_name}_stats.txt"
Path("Log.final.out").rename(out_stats)
outputs.stats = out_stats
out_sj = f"{mate1_name}_SJ.out.tab"
Path("SJ.out.tab").rename(out_sj)
outputs.sj = out_sj
outputs.species = inputs.genome.output.species
outputs.build = inputs.genome.output.build
| 42.033237 | 103 | 0.587685 | """Align reads with STAR aligner."""
import gzip
import shutil
from pathlib import Path
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
FloatField,
GroupField,
IntegerField,
Process,
SchedulingClass,
StringField,
)
SPECIES = [
"Caenorhabditis elegans",
"Cricetulus griseus",
"Dictyostelium discoideum",
"Dictyostelium purpureum",
"Drosophila melanogaster",
"Homo sapiens",
"Macaca mulatta",
"Mus musculus",
"Odocoileus virginianus texanus",
"Rattus norvegicus",
"Solanum tuberosum",
]
def get_fastq_name(fastq_path):
"""Get the name of the FASTQ file."""
fastq_file = fastq_path.name
assert fastq_file.endswith(".fastq.gz")
return fastq_file[:-9]
def concatenate_files(filenames, out_fname):
"""Concatenate and decompress files."""
with open(out_fname, "w") as outfile:
for fname in filenames:
assert Path(fname).suffix == ".gz"
with gzip.open(fname, "rt") as infile:
# Speed up file copy by increasing the buffersize [length].
# https://blogs.blumetech.com/blumetechs-tech-blog/2011/05/faster-python-file-copy.html
shutil.copyfileobj(fsrc=infile, fdst=outfile, length=10485760)
class AlignmentStar(Process):
"""Align reads with STAR aligner.
Spliced Transcripts Alignment to a Reference (STAR) software is
based on an alignment algorithm that uses sequential maximum
mappable seed search in uncompressed suffix arrays followed by seed
clustering and stitching procedure. In addition to unbiased de novo
detection of canonical junctions, STAR can discover non-canonical
splices and chimeric (fusion) transcripts, and is also capable of
mapping full-length RNA sequences. More information can be found in
the [STAR manual](https://github.com/alexdobin/STAR/blob/master/doc/STARmanual.pdf)
and in the [original paper](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3530905/).
"""
slug = "alignment-star"
name = "STAR"
process_type = "data:alignment:bam:star"
version = "3.0.2"
category = "Align"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/rnaseq:5.11.0"}
},
"resources": {
"cores": 10,
"memory": 36864,
},
}
data_name = "{{ reads|sample_name|default('?') }}"
class Input:
"""Input fields to process AlignmentStar."""
reads = DataField("reads:fastq", label="Input reads (FASTQ)")
genome = DataField(
"index:star",
label="Indexed reference genome",
description="Genome index prepared by STAR aligner indexing tool.",
)
annotation = DataField(
"annotation",
label="Annotation file (GTF/GFF3)",
required=False,
description="Insert known annotations into genome indices at the mapping stage.",
)
unstranded = BooleanField(
label="The data is unstranded [--outSAMstrandField intronMotif]",
default=False,
description="For unstranded RNA-seq data, Cufflinks/Cuffdiff require spliced "
"alignments with XS strand attribute, which STAR will generate with "
"--outSAMstrandField intronMotif option. As required, the XS strand attribute will be "
"generated for all alignments that contain splice junctions. The spliced alignments "
"that have undefined strand (i.e. containing only non-canonical unannotated "
"junctions) will be suppressed. If you have stranded RNA-seq data, you do not need to "
"use any specific STAR options. Instead, you need to run Cufflinks with the library "
"option --library-type options. For example, cufflinks --library-type fr-firststrand "
"should be used for the standard dUTP protocol, including Illumina's stranded "
"Tru-Seq. This option has to be used only for Cufflinks runs and not for STAR runs.",
)
noncannonical = BooleanField(
label="Remove non-cannonical junctions (Cufflinks compatibility)",
default=False,
description="It is recommended to remove the non-canonical junctions for Cufflinks "
"runs using --outFilterIntronMotifs RemoveNoncanonical.",
)
class AnnotationOptions:
"""Annotation file options."""
feature_exon = StringField(
label="Feature type [--sjdbGTFfeatureExon]",
default="exon",
description="Feature type in GTF file to be used as exons for building "
"transcripts.",
)
sjdb_overhang = IntegerField(
label="Junction length [--sjdbOverhang]",
default=100,
description="This parameter specifies the length of the genomic sequence around "
"the annotated junction to be used in constructing the splice junction database. "
"Ideally, this length should be equal to the ReadLength-1, where ReadLength is "
"the length of the reads. For instance, for Illumina 2x100b paired-end reads, the "
"ideal value is 100-1=99. In the case of reads of varying length, the ideal value "
"is max(ReadLength)-1. In most cases, the default value of 100 will work as well "
"as the ideal value.",
)
class ChimericReadsOptions:
"""Chimeric reads options."""
chimeric = BooleanField(
label="Detect chimeric and circular alignments [--chimOutType SeparateSAMold]",
default=False,
description="To switch on detection of chimeric (fusion) alignments (in addition "
"to normal mapping), --chimSegmentMin should be set to a positive value. Each "
"chimeric alignment consists of two segments.Each segment is non-chimeric on "
"its own, but the segments are chimeric to each other (i.e. the segments belong "
"to different chromosomes, or different strands, or are far from each other). "
"Both segments may contain splice junctions, and one of the segments may contain "
"portions of both mates. --chimSegmentMin parameter controls the minimum mapped "
"length of the two segments that is allowed. For example, if you have 2x75 reads "
"and used --chimSegmentMin 20, a chimeric alignment with 130b on one chromosome "
"and 20b on the other will be output, while 135 + 15 won't be.",
)
chim_segment_min = IntegerField(
label="Minimum length of chimeric segment [--chimSegmentMin]",
default=20,
disabled="!detect_chimeric.chimeric",
)
class TranscriptOutputOptions:
"""Transcript coordinate output options."""
quant_mode = BooleanField(
label="Output in transcript coordinates [--quantMode]",
default=False,
description="With --quantMode TranscriptomeSAM option STAR will output alignments "
"translated into transcript coordinates in the Aligned.toTranscriptome.out.bam "
"file (in addition to alignments in genomic coordinates in Aligned.*.sam/bam "
"files). These transcriptomic alignments can be used with various transcript "
"quantification software that require reads to be mapped to transcriptome, such "
"as RSEM or eXpress.",
)
single_end = BooleanField(
label="Allow soft-clipping and indels [--quantTranscriptomeBan Singleend]",
default=False,
disabled="!t_coordinates.quant_mode",
description="By default, the output satisfies RSEM requirements: soft-clipping or "
"indels are not allowed. Use --quantTranscriptomeBan Singleend to allow "
"insertions, deletions and soft-clips in the transcriptomic alignments, which "
"can be used by some expression quantification softwares (e.g. eXpress).",
)
class FilteringOptions:
"""Output filtering options."""
out_filter_type = StringField(
label="Type of filtering [--outFilterType]",
default="Normal",
choices=[
("Normal", "Normal"),
("BySJout", "BySJout"),
],
description="Normal: standard filtering using only current alignment; BySJout: "
"keep only those reads that contain junctions that passed filtering into "
"SJ.out.tab.",
)
out_multimap_max = IntegerField(
label="Maximum number of loci [--outFilterMultimapNmax]",
required=False,
description="Maximum number of loci the read is allowed to map to. Alignments "
"(all of them) will be output only if the read maps to no more loci than this "
"value. Otherwise no alignments will be output, and the read will be counted as "
"'mapped to too many loci' (default: 10).",
)
out_mismatch_max = IntegerField(
label="Maximum number of mismatches [--outFilterMismatchNmax]",
required=False,
description="Alignment will be output only if it has fewer mismatches than this "
"value (default: 10). Large number (e.g. 999) switches off this filter.",
)
out_mismatch_nl_max = FloatField(
label="Maximum no. of mismatches (map length) [--outFilterMismatchNoverLmax]",
required=False,
range=[0.0, 1.0],
description="Alignment will be output only if its ratio of mismatches to *mapped* "
"length is less than or equal to this value (default: 0.3). The value should be "
"between 0.0 and 1.0.",
)
out_score_min = IntegerField(
label="Minumum alignment score [--outFilterScoreMin]",
required=False,
description="Alignment will be output only if its score is higher than or equal "
"to this value (default: 0).",
)
out_mismatch_nrl_max = FloatField(
label="Maximum no. of mismatches (read length) [--outFilterMismatchNoverReadLmax]",
required=False,
range=[0.0, 1.0],
description="Alignment will be output only if its ratio of mismatches to *read* "
"length is less than or equal to this value (default: 1.0). Using 0.04 for "
"2x100bp, the max number of mismatches is calculated as 0.04*200=8 for the paired "
"read. The value should be between 0.0 and 1.0.",
)
class AlignmentOptions:
"""Alignment options."""
align_overhang_min = IntegerField(
label="Minimum overhang [--alignSJoverhangMin]",
required=False,
description="Minimum overhang (i.e. block size) for spliced alignments "
"(default: 5).",
)
align_sjdb_overhang_min = IntegerField(
label="Minimum overhang (sjdb) [--alignSJDBoverhangMin]",
required=False,
description="Minimum overhang (i.e. block size) for annotated (sjdb) spliced "
"alignments (default: 3).",
)
align_intron_size_min = IntegerField(
label="Minimum intron size [--alignIntronMin]",
required=False,
description="Minimum intron size: the genomic gap is considered an intron if its "
"length >= alignIntronMin, otherwise it is considered Deletion (default: 21).",
)
align_intron_size_max = IntegerField(
label="Maximum intron size [--alignIntronMax]",
required=False,
description="Maximum intron size, if 0, max intron size will be determined by "
"(2pow(winBinNbits)*winAnchorDistNbins)(default: 0).",
)
align_gap_max = IntegerField(
label="Minimum gap between mates [--alignMatesGapMax]",
required=False,
description="Maximum gap between two mates, if 0, max intron gap will be "
"determined by (2pow(winBinNbits)*winAnchorDistNbins) (default: 0).",
)
align_end_alignment = StringField(
label="Read ends alignment [--alignEndsType]",
required=False,
choices=[
("Local", "Local"),
("EndToEnd", "EndToEnd"),
("Extend5pOfRead1", "Extend5pOfRead1"),
("Extend5pOfReads12", "Extend5pOfReads12"),
],
description="Type of read ends alignment (default: Local). Local: standard local "
"alignment with soft-clipping allowed. EndToEnd: force end-to-end read alignment, "
"do not soft-clip. Extend5pOfRead1: fully extend only the 5p of the read1, all "
"other ends: local alignment. Extend5pOfReads12: fully extend only the 5' of the "
"both read1 and read2, all other ends use local alignment.",
)
class TwoPassOptions:
"""Two-pass mapping options."""
two_pass_mode = BooleanField(
label="Use two pass mode [--twopassMode]",
default=False,
description="Use two-pass maping instead of first-pass only. In two-pass mode we "
"first perform first-pass mapping, extract junctions, insert them into genome "
"index, and re-map all reads in the second mapping pass.",
)
class OutputOptions:
"""Output options."""
out_unmapped = BooleanField(
label="Output unmapped reads (SAM) [--outSAMunmapped Within]",
default=False,
description="Output of unmapped reads in the SAM format.",
)
out_sam_attributes = StringField(
label="Desired SAM attributes [--outSAMattributes]",
default="Standard",
choices=[
("Standard", "Standard"),
("All", "All"),
("NH HI NM MD", "NH HI NM MD"),
("None", "None"),
],
description="A string of desired SAM attributes, in the order desired for the "
"output SAM.",
)
out_rg_line = StringField(
label="SAM/BAM read group line [--outSAMattrRGline]",
required=False,
description="The first word contains the read group identifier and must start "
"with ID:, e.g. --outSAMattrRGline ID:xxx CN:yy ”DS:z z z” xxx will be added as "
"RG tag to each output alignment. Any spaces in the tag values have to be double "
"quoted. Comma separated RG lines correspons to different (comma separated) input "
"files in –readFilesIn. Commas have to be surrounded by spaces, e.g. "
"–outSAMattrRGline ID:xxx , ID:zzz ”DS:z z” , ID:yyy DS:yyyy.",
)
class Limits:
"""Limits."""
limit_buffer_size = IntegerField(
label="Buffer size [--limitIObufferSize]",
default=150000000,
description="Maximum available buffers size (bytes) for input/output, per thread.",
)
limit_sam_records = IntegerField(
label="Maximum size of the SAM record [--limitOutSAMoneReadBytes]",
default=100000,
description="Maximum size of the SAM record (bytes) for one read. Recommended "
"value: >(2*(LengthMate1+LengthMate2+100)*outFilterMultimapNmax.",
)
limit_junction_reads = IntegerField(
label="Maximum number of junctions [--limitOutSJoneRead]",
default=1000,
description="Maximum number of junctions for one read (including all "
"multi-mappers).",
)
limit_collapsed_junctions = IntegerField(
label="Maximum number of collapsed junctions [--limitOutSJcollapsed]",
default=1000000,
)
limit_inserted_junctions = IntegerField(
label="Maximum number of junction to be inserted [--limitSjdbInsertNsj]",
default=1000000,
description="Maximum number of junction to be inserted to the genome on the fly "
"at the mapping stage, including those from annotations and those detected in the "
"1st step of the 2-pass run.",
)
annotation_options = GroupField(
AnnotationOptions, label="Annotation file options", hidden="!annotation"
)
detect_chimeric = GroupField(
ChimericReadsOptions, label="Chimeric and circular alignments"
)
t_coordinates = GroupField(
TranscriptOutputOptions, label="Transcript coordinates output"
)
filtering = GroupField(FilteringOptions, label="Output Filtering")
alignment = GroupField(AlignmentOptions, label="Alignment and Seeding")
two_pass_mapping = GroupField(TwoPassOptions, label="Two-pass mapping")
output_options = GroupField(OutputOptions, label="Output options")
limits = GroupField(Limits, label="Limits")
class Output:
"""Output fields to process AlignmentStar."""
bam = FileField(label="Alignment file")
bai = FileField(label="BAM file index")
unmapped_1 = FileField(label="Unmapped reads (mate 1)", required=False)
unmapped_2 = FileField(label="Unmapped reads (mate 2)", required=False)
sj = FileField(label="Splice junctions")
chimeric = FileField(label="Chimeric alignments", required=False)
alignment_transcriptome = FileField(
label="Alignment (trancriptome coordinates)", required=False
)
stats = FileField(label="Statistics")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
try:
if (
inputs.reads.entity.descriptor["general"]["species"]
!= inputs.genome.output.species
):
self.warning(
f"Species of reads ({inputs.reads.entity.descriptor['general']['species']}) "
f"and genome ({inputs.genome.output.species}) do not match."
)
except KeyError:
if inputs.genome.output.species in SPECIES:
self.update_entity_descriptor(
{"general.species": inputs.genome.output.species}
)
self.info(
"Sample species was automatically annotated to match the genome."
)
mate1_name = get_fastq_name(Path(inputs.reads.output.fastq[0].path))
mate_1 = [fastq.path for fastq in inputs.reads.output.fastq]
concatenated_r1 = "mate_1.fastq"
try:
concatenate_files(filenames=mate_1, out_fname=concatenated_r1)
except Exception as error:
self.error(
f"Failed to concatenate FASTQ files (mate 1). The error was: {error}"
)
if inputs.reads.type.startswith("data:reads:fastq:paired:"):
mate2_name = get_fastq_name(Path(inputs.reads.output.fastq2[0].path))
mate_2 = [fastq.path for fastq in inputs.reads.output.fastq2]
concatenated_r2 = "mate_2.fastq"
try:
concatenate_files(filenames=mate_2, out_fname=concatenated_r2)
except Exception as error:
self.error(
f"Failed to concatenate FASTQ files (mate 2). The error was: {error}"
)
self.progress(0.05)
star_params = [
"--runThreadN",
self.requirements.resources.cores,
"--genomeDir",
inputs.genome.output.index.path,
"--outReadsUnmapped",
"Fastx",
"--limitIObufferSize",
inputs.limits.limit_buffer_size,
"--limitOutSAMoneReadBytes",
inputs.limits.limit_sam_records,
"--limitOutSJoneRead",
inputs.limits.limit_junction_reads,
"--limitOutSJcollapsed",
inputs.limits.limit_collapsed_junctions,
"--limitSjdbInsertNsj",
inputs.limits.limit_inserted_junctions,
"--outFilterType",
inputs.filtering.out_filter_type,
"--outSAMtype",
"BAM",
"Unsorted",
]
if inputs.reads.type.startswith("data:reads:fastq:single:"):
star_params.extend(["--readFilesIn", concatenated_r1])
else:
star_params.extend(["--readFilesIn", concatenated_r1, concatenated_r2])
if inputs.annotation:
star_params.extend(
[
"--sjdbGTFfile",
inputs.annotation.output.annot.path,
"--sjdbOverhang",
inputs.annotation_options.sjdb_overhang,
"--sjdbGTFfeatureExon",
inputs.annotation_options.feature_exon,
]
)
if inputs.annotation.type.startswith("data:annotation:gff3:"):
star_params.extend(["--sjdbGTFtagExonParentTranscript", "Parent"])
if inputs.unstranded:
star_params.extend(["--outSAMstrandField", "intronMotif"])
if inputs.noncannonical:
star_params.extend(["--outFilterIntronMotifs", "RemoveNoncanonical"])
if inputs.detect_chimeric.chimeric:
star_params.extend(
[
"--chimOutType",
"SeparateSAMold",
"--chimSegmentMin",
inputs.detect_chimeric.chim_segment_min,
]
)
if inputs.t_coordinates.quant_mode:
gene_segments = Path(inputs.genome.output.index.path) / "geneInfo.tab"
if not gene_segments.is_file() and not inputs.annotation:
self.error(
"Output in transcript coordinates requires genome annotation file."
)
star_params.extend(["--quantMode", "TranscriptomeSAM"])
if inputs.t_coordinates.single_end:
star_params.extend(["--quantTranscriptomeBan", "Singleend"])
if inputs.filtering.out_multimap_max:
star_params.extend(
["--outFilterMultimapNmax", inputs.filtering.out_multimap_max]
)
if inputs.filtering.out_mismatch_max:
star_params.extend(
["--outFilterMismatchNmax", inputs.filtering.out_mismatch_max]
)
if inputs.filtering.out_mismatch_nl_max:
star_params.extend(
["--outFilterMismatchNoverLmax", inputs.filtering.out_mismatch_nl_max]
)
if inputs.filtering.out_score_min:
star_params.extend(["--outFilterScoreMin", inputs.filtering.out_score_min])
if inputs.filtering.out_mismatch_nrl_max:
star_params.extend(
[
"--outFilterMismatchNoverReadLmax",
inputs.filtering.out_mismatch_nrl_max,
]
)
if inputs.alignment.align_overhang_min:
star_params.extend(
["--alignSJoverhangMin", inputs.alignment.align_overhang_min]
)
if inputs.alignment.align_sjdb_overhang_min:
star_params.extend(
["--alignSJDBoverhangMin", inputs.alignment.align_sjdb_overhang_min]
)
if inputs.alignment.align_intron_size_min:
star_params.extend(
["--alignIntronMin", inputs.alignment.align_intron_size_min]
)
if inputs.alignment.align_intron_size_max:
star_params.extend(
["--alignIntronMax", inputs.alignment.align_intron_size_max]
)
if inputs.alignment.align_gap_max:
star_params.extend(["--alignMatesGapMax", inputs.alignment.align_gap_max])
if inputs.alignment.align_end_alignment:
star_params.extend(
["--alignMatesGapMax", inputs.alignment.align_end_alignment]
)
if inputs.two_pass_mapping.two_pass_mode:
star_params.extend(["--twopassMode", "Basic"])
if inputs.output_options.out_unmapped:
star_params.extend(["--outSAMunmapped", "Within"])
if inputs.output_options.out_sam_attributes:
# Create a list from string of out_sam_attributes to avoid unknown/unimplemented
# SAM attrribute error due to Plumbum command passing problems.
attributes = inputs.output_options.out_sam_attributes.split(" ")
star_params.extend(["--outSAMattributes", attributes])
if inputs.output_options.out_rg_line:
star_params.extend(
["--outSAMattrRGline", inputs.output_options.out_rg_line]
)
self.progress(0.1)
return_code, _, _ = Cmd["STAR"][star_params] & TEE(retcode=None)
log_file = Path("Log.out")
# Log contains useful information for debugging.
if log_file.is_file():
with open(log_file, "r") as log:
print(log.read())
if return_code:
self.error("Reads alignment failed.")
self.progress(0.7)
star_unmapped_r1 = Path("Unmapped.out.mate1")
if star_unmapped_r1.is_file():
unmapped_out_1 = f"{mate1_name}_unmapped.out.mate1.fastq"
star_unmapped_r1.rename(unmapped_out_1)
return_code, _, _ = Cmd["pigz"][unmapped_out_1] & TEE(retcode=None)
if return_code:
self.error("Compression of unmapped mate 1 reads failed.")
outputs.unmapped_1 = f"{unmapped_out_1}.gz"
star_unmapped_r2 = Path("Unmapped.out.mate2")
if (
inputs.reads.type.startswith("data:reads:fastq:paired:")
and star_unmapped_r2.is_file()
):
unmapped_out_2 = f"{mate2_name}_unmapped.out.mate2.fastq"
star_unmapped_r2.rename(unmapped_out_2)
return_code, _, _ = Cmd["pigz"][unmapped_out_2] & TEE(retcode=None)
if return_code:
self.error("Compression of unmapped mate 2 reads failed.")
outputs.unmapped_2 = f"{unmapped_out_2}.gz"
self.progress(0.8)
out_bam = f"{mate1_name}.bam"
out_bai = f"{out_bam}.bai"
sort_params = [
"Aligned.out.bam",
"-o",
out_bam,
"-@",
self.requirements.resources.cores,
]
return_code, _, _ = Cmd["samtools"]["sort"][sort_params] & TEE(retcode=None)
if return_code:
self.error("Samtools sort command failed.")
outputs.bam = out_bam
return_code, _, _ = Cmd["samtools"]["index"][out_bam, out_bai] & TEE(
retcode=None
)
if return_code:
self.error("Samtools index command failed.")
outputs.bai = out_bai
self.progress(0.9)
if inputs.detect_chimeric.chimeric:
out_chimeric = f"{mate1_name}_chimeric.out.sam"
Path("Chimeric.out.sam").rename(out_chimeric)
outputs.chimeric = out_chimeric
if inputs.t_coordinates.quant_mode:
out_transcriptome = f"{mate1_name}_aligned.toTranscriptome.out.bam"
Path("Aligned.toTranscriptome.out.bam").rename(out_transcriptome)
outputs.alignment_transcriptome = out_transcriptome
out_stats = f"{mate1_name}_stats.txt"
Path("Log.final.out").rename(out_stats)
outputs.stats = out_stats
out_sj = f"{mate1_name}_SJ.out.tab"
Path("SJ.out.tab").rename(out_sj)
outputs.sj = out_sj
outputs.species = inputs.genome.output.species
outputs.build = inputs.genome.output.build
| 0 | 0 | 0 |
3568b320c9b54a136b9f6bb0bf61aa5462c0e752 | 5,759 | py | Python | lywsd02/client.py | andras-tim/lywsd02 | a5d7fb41094a7bf66b0e3bd943f922b3c529d1ca | [
"MIT"
] | null | null | null | lywsd02/client.py | andras-tim/lywsd02 | a5d7fb41094a7bf66b0e3bd943f922b3c529d1ca | [
"MIT"
] | null | null | null | lywsd02/client.py | andras-tim/lywsd02 | a5d7fb41094a7bf66b0e3bd943f922b3c529d1ca | [
"MIT"
] | null | null | null | import collections
import contextlib
import logging
import struct
import time
from datetime import datetime
from bluepy import btle
_LOGGER = logging.getLogger(__name__)
UUID_UNITS = 'EBE0CCBE-7A0A-4B0C-8A1A-6FF2997DA3A6' # 0x00 - F, 0x01 - C READ WRITE
UUID_HISTORY = 'EBE0CCBC-7A0A-4B0C-8A1A-6FF2997DA3A6' # Last idx 152 READ NOTIFY
UUID_TIME = 'EBE0CCB7-7A0A-4B0C-8A1A-6FF2997DA3A6' # 5 or 4 bytes READ WRITE
UUID_DATA = 'EBE0CCC1-7A0A-4B0C-8A1A-6FF2997DA3A6' # 3 bytes READ NOTIFY
UUID_BATTERY = 'EBE0CCC4-7A0A-4B0C-8A1A-6FF2997DA3A6'
| 31.12973 | 91 | 0.610523 | import collections
import contextlib
import logging
import struct
import time
from datetime import datetime
from bluepy import btle
_LOGGER = logging.getLogger(__name__)
UUID_UNITS = 'EBE0CCBE-7A0A-4B0C-8A1A-6FF2997DA3A6' # 0x00 - F, 0x01 - C READ WRITE
UUID_HISTORY = 'EBE0CCBC-7A0A-4B0C-8A1A-6FF2997DA3A6' # Last idx 152 READ NOTIFY
UUID_TIME = 'EBE0CCB7-7A0A-4B0C-8A1A-6FF2997DA3A6' # 5 or 4 bytes READ WRITE
UUID_DATA = 'EBE0CCC1-7A0A-4B0C-8A1A-6FF2997DA3A6' # 3 bytes READ NOTIFY
UUID_BATTERY = 'EBE0CCC4-7A0A-4B0C-8A1A-6FF2997DA3A6'
class SensorData(
collections.namedtuple('SensorDataBase', ['temperature', 'humidity'])):
__slots__ = ()
class Lywsd02Client:
UNITS = {
b'\x01': 'F',
b'\xff': 'C',
}
UNITS_CODES = {
'C': b'\xff',
'F': b'\x01',
}
def __init__(self, mac, notification_timeout=5.0):
self._mac = mac
self._peripheral = btle.Peripheral()
self._notification_timeout = notification_timeout
self._handles = {}
self._tz_offset = None
self._data = SensorData(None, None)
self._history_data = collections.OrderedDict()
self._connected = False
@contextlib.contextmanager
def connect(self):
already_connected = self._connected
if not self._connected:
_LOGGER.debug('Connecting to %s', self._mac)
self._peripheral.connect(self._mac)
self._connected = True
try:
yield self
finally:
if not already_connected and self._connected:
_LOGGER.debug('Disconnecting from %s', self._mac)
self._peripheral.disconnect()
self._connected = False
@property
def temperature(self):
return self.data.temperature
@property
def humidity(self):
return self.data.humidity
@property
def data(self):
self._get_sensor_data()
return self._data
@property
def units(self):
with self.connect():
ch = self._peripheral.getCharacteristics(uuid=UUID_UNITS)[0]
value = ch.read()
return self.UNITS[value]
@units.setter
def units(self, value):
if value.upper() not in self.UNITS_CODES.keys():
raise ValueError(
'Units value must be one of %s' % self.UNITS_CODES.keys())
with self.connect():
ch = self._peripheral.getCharacteristics(uuid=UUID_UNITS)[0]
ch.write(self.UNITS_CODES[value.upper()], withResponse=True)
@property
def battery(self):
with self.connect():
ch = self._peripheral.getCharacteristics(uuid=UUID_BATTERY)[0]
value = ch.read()
return ord(value)
@property
def time(self):
with self.connect():
ch = self._peripheral.getCharacteristics(uuid=UUID_TIME)[0]
value = ch.read()
if len(value) == 5:
ts, tz_offset = struct.unpack('Ib', value)
else:
ts = struct.unpack('I', value)[0]
tz_offset = 0
return datetime.fromtimestamp(ts), tz_offset
@time.setter
def time(self, dt: datetime):
if self._tz_offset is not None:
tz_offset = self._tz_offset
elif time.daylight != 0:
tz_offset = int(-time.altzone / 3600)
else:
tz_offset = int(-time.timezone / 3600)
data = struct.pack('Ib', int(dt.timestamp()), tz_offset)
with self.connect():
ch = self._peripheral.getCharacteristics(uuid=UUID_TIME)[0]
ch.write(data, withResponse=True)
@property
def tz_offset(self):
return self._tz_offset
@tz_offset.setter
def tz_offset(self, tz_offset: int):
self._tz_offset = tz_offset
@property
def history_data(self):
self._get_history_data()
return self._history_data
def _get_sensor_data(self):
with self.connect():
self._subscribe(UUID_DATA, self._process_sensor_data)
if not self._peripheral.waitForNotifications(
self._notification_timeout):
raise TimeoutError('No data from device for {} seconds'.format(
self._notification_timeout))
def _get_history_data(self):
with self.connect():
self._subscribe(UUID_HISTORY, self._process_history_data)
while True:
if not self._peripheral.waitForNotifications(
self._notification_timeout):
break
def handleNotification(self, handle, data):
func = self._handles.get(handle)
if func:
func(data)
def _subscribe(self, uuid, callback):
self._peripheral.setDelegate(self)
ch = self._peripheral.getCharacteristics(uuid=uuid)[0]
self._handles[ch.getHandle()] = callback
desc = ch.getDescriptors(forUUID=0x2902)[0]
desc.write(0x01.to_bytes(2, byteorder="little"), withResponse=True)
def _process_sensor_data(self, data):
temperature, humidity = struct.unpack_from('hB', data)
temperature /= 100
self._data = SensorData(temperature=temperature, humidity=humidity)
def _process_history_data(self, data):
# TODO unpacking with IIhBhB in one step doesn't work
(idx, ts) = struct.unpack_from('II', data[0:8])
(max_temp, max_hum) = struct.unpack_from('hB', data[8:11])
(min_temp, min_hum) = struct.unpack_from('hB', data[11:14])
ts = datetime.fromtimestamp(ts)
min_temp /= 100
max_temp /= 100
self._history_data[idx] = [ts, min_temp, min_hum, max_temp, max_hum]
| 4,181 | 937 | 46 |
a779c56183368760dd85753f55634afaf79389d6 | 697 | py | Python | problems/euler/45/pentagonal.py | vidyadeepa/the-coding-interview | 90171b77b6884176a6c28bdccb5d45bd6929b489 | [
"MIT"
] | 1,571 | 2015-12-09T14:08:47.000Z | 2022-03-30T21:34:36.000Z | problems/euler/45/pentagonal.py | vidyadeepa/the-coding-interview | 90171b77b6884176a6c28bdccb5d45bd6929b489 | [
"MIT"
] | 117 | 2015-10-22T05:59:19.000Z | 2021-09-17T00:14:38.000Z | problems/euler/45/pentagonal.py | vidyadeepa/the-coding-interview | 90171b77b6884176a6c28bdccb5d45bd6929b489 | [
"MIT"
] | 452 | 2015-10-21T23:00:58.000Z | 2022-03-18T21:16:50.000Z | from itertools import takewhile, combinations
start = 40756
tg = triangle_generator(start)
pg = pentagonal_generator(start)
hg = hexagonal_generator(start)
p = pg.next()
t = tg.next()
for h in hg:
while p < h:
p = pg.next()
if p != h:
continue
while t < h:
t = tg.next()
if t == h:
print h
| 15.840909 | 45 | 0.572453 | from itertools import takewhile, combinations
def triangle_generator(start):
n = 1
while True:
num = n*(n+1)/2
if num >= start:
yield num
n = n + 1
def pentagonal_generator(start):
n = 1
while True:
num = n*(3*n-1)/2
if num >= start:
yield num
n = n + 1
def hexagonal_generator(start):
n = 1
while True:
num = n*(2*n-1)
if num >= start:
yield num
n = n + 1
start = 40756
tg = triangle_generator(start)
pg = pentagonal_generator(start)
hg = hexagonal_generator(start)
p = pg.next()
t = tg.next()
for h in hg:
while p < h:
p = pg.next()
if p != h:
continue
while t < h:
t = tg.next()
if t == h:
print h
| 311 | 0 | 69 |
b8b810d6136b1aa2892efe14a75d85f0d51b1527 | 301 | py | Python | classifications/getSubjectDump.py | tingard/Galaxy-builder-aggregation | 78fec76eeb2ab4b38e241b66fa5643e0002ba3a7 | [
"MIT"
] | 1 | 2018-05-16T14:48:43.000Z | 2018-05-16T14:48:43.000Z | classifications/getSubjectDump.py | tingard/Galaxy-builder-aggregation | 78fec76eeb2ab4b38e241b66fa5643e0002ba3a7 | [
"MIT"
] | null | null | null | classifications/getSubjectDump.py | tingard/Galaxy-builder-aggregation | 78fec76eeb2ab4b38e241b66fa5643e0002ba3a7 | [
"MIT"
] | null | null | null | import re
import json
import sys
if len(sys.argv) > 1:
fpath = sys.argv[1]
else:
fpath = 'galaxy-builder-subjects.csv'
try:
with open(fpath) as f:
classificationCsv = f.read().split('\n')[1:]
except FileNotFoundError:
print('No subjects file found, exiting')
sys.exit(0)
| 18.8125 | 52 | 0.651163 | import re
import json
import sys
if len(sys.argv) > 1:
fpath = sys.argv[1]
else:
fpath = 'galaxy-builder-subjects.csv'
try:
with open(fpath) as f:
classificationCsv = f.read().split('\n')[1:]
except FileNotFoundError:
print('No subjects file found, exiting')
sys.exit(0)
| 0 | 0 | 0 |
487508962cb47f6dd9ce8ee856085cd1bb2f3541 | 998 | py | Python | raspberry_pi/lightning/config.py | asmyczek/lightning | f0b27ae07ade148a8ef938bb2356a83650eb3197 | [
"MIT"
] | null | null | null | raspberry_pi/lightning/config.py | asmyczek/lightning | f0b27ae07ade148a8ef938bb2356a83650eb3197 | [
"MIT"
] | null | null | null | raspberry_pi/lightning/config.py | asmyczek/lightning | f0b27ae07ade148a8ef938bb2356a83650eb3197 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from typing import Dict, Any
from json import loads
from pathlib import Path
import logging
| 23.209302 | 57 | 0.538076 | # -*- coding: utf-8 -*-
from typing import Dict, Any
from json import loads
from pathlib import Path
import logging
def load_config(config_file) -> Dict:
try:
config_file = Path(config_file).resolve()
with config_file.open() as file:
return loads(file.read())
except FileNotFoundError:
logging.error('Config file does not exist!')
class Config(object):
_config: Dict = None
def __init__(self, config_file: str = 'config.json'):
self._config = load_config(config_file)
def __call__(self, *args) -> Any:
return self.get(*args)
def get(self, *args) -> Any:
c = self._config
for a in args:
if a in c:
c = c[a]
else:
return None
return c
def set(self, value: Any, *args) -> Any:
c = self._config
for a in args[:-1]:
if a not in c:
c[a] = {}
c = c[a]
c[args[-1]] = value
| 700 | 133 | 46 |
be70ea7a964506442e645e8316bd0bab9a81f566 | 13,643 | py | Python | src/rez/build_system.py | ColinKennedy/rez | 1ecc85f638d11d70ed78d4bd9c5cdc6f32ac58c4 | [
"Apache-2.0"
] | null | null | null | src/rez/build_system.py | ColinKennedy/rez | 1ecc85f638d11d70ed78d4bd9c5cdc6f32ac58c4 | [
"Apache-2.0"
] | null | null | null | src/rez/build_system.py | ColinKennedy/rez | 1ecc85f638d11d70ed78d4bd9c5cdc6f32ac58c4 | [
"Apache-2.0"
] | null | null | null | # SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
import os.path
from rez.build_process import BuildType
from rez.exceptions import BuildSystemError
from rez.packages import get_developer_package
from rez.rex_bindings import VariantBinding
def get_buildsys_types():
"""Returns the available build system implementations - cmake, make etc."""
from rez.plugin_managers import plugin_manager
return plugin_manager.get_plugins('build_system')
def get_valid_build_systems(working_dir, package=None):
"""Returns the build system classes that could build the source in given dir.
Args:
working_dir (str): Dir containing the package definition and potentially
build files.
package (`Package`): Package to be built. This may or may not be needed
to determine the build system. For eg, cmake just has to look for
a CMakeLists.txt file, whereas the 'build_command' package field
must be present for the 'custom' build system type.
Returns:
List of class: Valid build system class types.
"""
from rez.plugin_managers import plugin_manager
from rez.exceptions import PackageMetadataError
try:
package = package or get_developer_package(working_dir)
except PackageMetadataError:
# no package, or bad package
pass
if package:
if getattr(package, "build_command", None) is not None:
buildsys_name = "custom"
else:
buildsys_name = getattr(package, "build_system", None)
# package explicitly specifies build system
if buildsys_name:
cls = plugin_manager.get_plugin_class('build_system', buildsys_name)
return [cls]
# detect valid build systems
clss = []
for buildsys_name in get_buildsys_types():
cls = plugin_manager.get_plugin_class('build_system', buildsys_name)
if cls.is_valid_root(working_dir, package=package):
clss.append(cls)
# Sometimes files for multiple build systems can be present, because one
# build system uses another (a 'child' build system) - eg, cmake uses
# make. Detect this case and ignore files from the child build system.
#
child_clss = set(x.child_build_system() for x in clss)
clss = list(set(clss) - child_clss)
return clss
def create_build_system(working_dir, buildsys_type=None, package=None, opts=None,
write_build_scripts=False, verbose=False,
build_args=[], child_build_args=[]):
"""Return a new build system that can build the source in working_dir."""
from rez.plugin_managers import plugin_manager
# detect build system if necessary
if not buildsys_type:
clss = get_valid_build_systems(working_dir, package=package)
if not clss:
# Special case - bez. This is an old deprecated build system,
# which expects a rezbuild.py file. Include info in error showing
# how to port to a custom build command.
#
if os.path.exists(os.path.join(working_dir, "rezbuild.py")):
msg = (
"No build system is associated with the path %s.\n"
"\n"
"There is a rezbuild.py file present, suggesting you were "
"using the deprecated bez build system. You need to use a "
"custom build command instead. You can port your existing "
"rezbuild.py like so:\n"
"\n"
"Add this line to package.py:\n"
"\n"
" build_command = 'python {root}/rezbuild.py {install}'\n"
"\n"
"Add these lines to rezbuild.py:\n"
"\n"
" if __name__ == '__main__':\n"
" import os, sys\n"
" build(\n"
" source_path=os.environ['REZ_BUILD_SOURCE_PATH'],\n"
" build_path=os.environ['REZ_BUILD_PATH'],\n"
" install_path=os.environ['REZ_BUILD_INSTALL_PATH'],\n"
" targets=sys.argv[1:]\n"
" )"
)
raise BuildSystemError(msg % working_dir)
raise BuildSystemError(
"No build system is associated with the path %s" % working_dir)
if len(clss) != 1:
s = ', '.join(x.name() for x in clss)
raise BuildSystemError(("Source could be built with one of: %s; "
"Please specify a build system") % s)
buildsys_type = next(iter(clss)).name()
# create instance of build system
cls_ = plugin_manager.get_plugin_class('build_system', buildsys_type)
return cls_(working_dir,
opts=opts,
package=package,
write_build_scripts=write_build_scripts,
verbose=verbose,
build_args=build_args,
child_build_args=child_build_args)
class BuildSystem(object):
"""A build system, such as cmake, make, Scons etc.
"""
@classmethod
def name(cls):
"""Return the name of the build system, eg 'make'."""
raise NotImplementedError
def __init__(self, working_dir, opts=None, package=None,
write_build_scripts=False, verbose=False, build_args=[],
child_build_args=[]):
"""Create a build system instance.
Args:
working_dir: Directory to build source from.
opts: argparse.Namespace object which may contain constructor
params, as set by our bind_cli() classmethod.
package (`DeveloperPackage`): Package to build. If None, defaults to
the package in the working directory.
write_build_scripts: If True, create build scripts rather than
perform the full build. The user can then run these scripts to
place themselves into a build environment and invoke the build
system directly.
build_args: Extra cli build arguments.
child_build_args: Extra cli args for child build system, ignored if
there is no child build system.
"""
self.working_dir = working_dir
if not self.is_valid_root(working_dir):
raise BuildSystemError(
"Not a valid working directory for build system %r: %s"
% (self.name(), working_dir))
self.package = package or get_developer_package(working_dir)
self.write_build_scripts = write_build_scripts
self.build_args = build_args
self.child_build_args = child_build_args
self.verbose = verbose
self.opts = opts
@classmethod
def is_valid_root(cls, path):
"""Return True if this build system can build the source in path."""
raise NotImplementedError
@classmethod
def child_build_system(cls):
"""Returns the child build system.
Some build systems, such as cmake, don't build the source directly.
Instead, they build an interim set of build scripts that are then
consumed by a second build system (such as make). You should implement
this method if that's the case.
Returns:
Name of build system (corresponding to the plugin name) if this
system has a child system, or None otherwise.
"""
return None
@classmethod
def bind_cli(cls, parser, group):
"""Expose parameters to an argparse.ArgumentParser that are specific
to this build system.
Args:
parser (`ArgumentParser`): Arg parser.
group (`ArgumentGroup`): Arg parser group - you should add args to
this, NOT to `parser`.
"""
pass
def build(self, context, variant, build_path, install_path, install=False,
build_type=BuildType.local):
"""Implement this method to perform the actual build.
Args:
context: A ResolvedContext object that the build process must be
executed within.
variant (`Variant`): The variant being built.
build_path: Where to write temporary build files. May be absolute
or relative to working_dir.
install_path (str): The package repository path to install the
package to, if installing. If None, defaults to
`config.local_packages_path`.
install: If True, install the build.
build_type: A BuildType (i.e local or central).
Returns:
A dict containing the following information:
- success: Bool indicating if the build was successful.
- extra_files: List of created files of interest, not including
build targets. A good example is the interpreted context file,
usually named 'build.rxt.sh' or similar. These files should be
located under build_path. Rez may install them for debugging
purposes.
- build_env_script: If this instance was created with write_build_scripts
as True, then the build should generate a script which, when run
by the user, places them in the build environment.
"""
raise NotImplementedError
@classmethod
def set_standard_vars(cls, executor, context, variant, build_type, install,
build_path, install_path=None):
"""Set some standard env vars that all build systems can rely on.
"""
from rez.config import config
package = variant.parent
variant_requires = map(str, variant.variant_requires)
if variant.index is None:
variant_subpath = ''
else:
variant_subpath = variant._non_shortlinked_subpath
vars_ = {
'REZ_BUILD_ENV': 1,
'REZ_BUILD_PATH': executor.normalize_path(build_path),
'REZ_BUILD_THREAD_COUNT': package.config.build_thread_count,
'REZ_BUILD_VARIANT_INDEX': variant.index or 0,
'REZ_BUILD_VARIANT_REQUIRES': ' '.join(variant_requires),
'REZ_BUILD_VARIANT_SUBPATH': executor.normalize_path(variant_subpath),
'REZ_BUILD_PROJECT_VERSION': str(package.version),
'REZ_BUILD_PROJECT_NAME': package.name,
'REZ_BUILD_PROJECT_DESCRIPTION': (package.description or '').strip(),
'REZ_BUILD_PROJECT_FILE': package.filepath,
'REZ_BUILD_SOURCE_PATH': executor.normalize_path(
os.path.dirname(package.filepath)
),
'REZ_BUILD_REQUIRES': ' '.join(
str(x) for x in context.requested_packages(True)
),
'REZ_BUILD_REQUIRES_UNVERSIONED': ' '.join(
x.name for x in context.requested_packages(True)
),
'REZ_BUILD_TYPE': build_type.name,
'REZ_BUILD_INSTALL': 1 if install else 0,
}
if install_path:
vars_['REZ_BUILD_INSTALL_PATH'] = executor.normalize_path(install_path)
if config.rez_1_environment_variables and \
not config.disable_rez_1_compatibility and \
build_type == BuildType.central:
vars_['REZ_IN_REZ_RELEASE'] = 1
# set env vars
for key, value in vars_.items():
executor.env[key] = value
@classmethod
def add_pre_build_commands(cls, executor, variant, build_type, install,
build_path, install_path=None):
"""Execute pre_build_commands function if present."""
from rez.utils.data_utils import RO_AttrDictWrapper as ROA
# bind build-related values into a 'build' namespace
build_ns = {
"build_type": build_type.name,
"install": install,
"build_path": executor.normalize_path(build_path),
"install_path": executor.normalize_path(install_path)
}
# execute pre_build_commands()
# note that we need to wrap variant in a VariantBinding so that any refs
# to (eg) 'this.root' in pre_build_commands() will get the possibly
# normalized path.
#
pre_build_commands = getattr(variant, "pre_build_commands")
# TODO I suspect variant root isn't correctly set to the cached root
# when pkg caching is enabled (see use of VariantBinding in
# ResolvedContext._execute).
#
bound_variant = VariantBinding(
variant,
interpreter=executor.interpreter
)
if pre_build_commands:
with executor.reset_globals():
executor.bind("this", bound_variant)
executor.bind("build", ROA(build_ns))
executor.execute_code(pre_build_commands)
@classmethod
def add_standard_build_actions(cls, executor, context, variant, build_type,
install, build_path, install_path=None):
"""Perform build actions common to every build system.
"""
# set env vars
cls.set_standard_vars(
executor=executor,
context=context,
variant=variant,
build_type=build_type,
install=install,
build_path=build_path,
install_path=install_path
)
| 40.008798 | 86 | 0.607931 | # SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
import os.path
from rez.build_process import BuildType
from rez.exceptions import BuildSystemError
from rez.packages import get_developer_package
from rez.rex_bindings import VariantBinding
def get_buildsys_types():
"""Returns the available build system implementations - cmake, make etc."""
from rez.plugin_managers import plugin_manager
return plugin_manager.get_plugins('build_system')
def get_valid_build_systems(working_dir, package=None):
"""Returns the build system classes that could build the source in given dir.
Args:
working_dir (str): Dir containing the package definition and potentially
build files.
package (`Package`): Package to be built. This may or may not be needed
to determine the build system. For eg, cmake just has to look for
a CMakeLists.txt file, whereas the 'build_command' package field
must be present for the 'custom' build system type.
Returns:
List of class: Valid build system class types.
"""
from rez.plugin_managers import plugin_manager
from rez.exceptions import PackageMetadataError
try:
package = package or get_developer_package(working_dir)
except PackageMetadataError:
# no package, or bad package
pass
if package:
if getattr(package, "build_command", None) is not None:
buildsys_name = "custom"
else:
buildsys_name = getattr(package, "build_system", None)
# package explicitly specifies build system
if buildsys_name:
cls = plugin_manager.get_plugin_class('build_system', buildsys_name)
return [cls]
# detect valid build systems
clss = []
for buildsys_name in get_buildsys_types():
cls = plugin_manager.get_plugin_class('build_system', buildsys_name)
if cls.is_valid_root(working_dir, package=package):
clss.append(cls)
# Sometimes files for multiple build systems can be present, because one
# build system uses another (a 'child' build system) - eg, cmake uses
# make. Detect this case and ignore files from the child build system.
#
child_clss = set(x.child_build_system() for x in clss)
clss = list(set(clss) - child_clss)
return clss
def create_build_system(working_dir, buildsys_type=None, package=None, opts=None,
write_build_scripts=False, verbose=False,
build_args=[], child_build_args=[]):
"""Return a new build system that can build the source in working_dir."""
from rez.plugin_managers import plugin_manager
# detect build system if necessary
if not buildsys_type:
clss = get_valid_build_systems(working_dir, package=package)
if not clss:
# Special case - bez. This is an old deprecated build system,
# which expects a rezbuild.py file. Include info in error showing
# how to port to a custom build command.
#
if os.path.exists(os.path.join(working_dir, "rezbuild.py")):
msg = (
"No build system is associated with the path %s.\n"
"\n"
"There is a rezbuild.py file present, suggesting you were "
"using the deprecated bez build system. You need to use a "
"custom build command instead. You can port your existing "
"rezbuild.py like so:\n"
"\n"
"Add this line to package.py:\n"
"\n"
" build_command = 'python {root}/rezbuild.py {install}'\n"
"\n"
"Add these lines to rezbuild.py:\n"
"\n"
" if __name__ == '__main__':\n"
" import os, sys\n"
" build(\n"
" source_path=os.environ['REZ_BUILD_SOURCE_PATH'],\n"
" build_path=os.environ['REZ_BUILD_PATH'],\n"
" install_path=os.environ['REZ_BUILD_INSTALL_PATH'],\n"
" targets=sys.argv[1:]\n"
" )"
)
raise BuildSystemError(msg % working_dir)
raise BuildSystemError(
"No build system is associated with the path %s" % working_dir)
if len(clss) != 1:
s = ', '.join(x.name() for x in clss)
raise BuildSystemError(("Source could be built with one of: %s; "
"Please specify a build system") % s)
buildsys_type = next(iter(clss)).name()
# create instance of build system
cls_ = plugin_manager.get_plugin_class('build_system', buildsys_type)
return cls_(working_dir,
opts=opts,
package=package,
write_build_scripts=write_build_scripts,
verbose=verbose,
build_args=build_args,
child_build_args=child_build_args)
class BuildSystem(object):
"""A build system, such as cmake, make, Scons etc.
"""
@classmethod
def name(cls):
"""Return the name of the build system, eg 'make'."""
raise NotImplementedError
def __init__(self, working_dir, opts=None, package=None,
write_build_scripts=False, verbose=False, build_args=[],
child_build_args=[]):
"""Create a build system instance.
Args:
working_dir: Directory to build source from.
opts: argparse.Namespace object which may contain constructor
params, as set by our bind_cli() classmethod.
package (`DeveloperPackage`): Package to build. If None, defaults to
the package in the working directory.
write_build_scripts: If True, create build scripts rather than
perform the full build. The user can then run these scripts to
place themselves into a build environment and invoke the build
system directly.
build_args: Extra cli build arguments.
child_build_args: Extra cli args for child build system, ignored if
there is no child build system.
"""
self.working_dir = working_dir
if not self.is_valid_root(working_dir):
raise BuildSystemError(
"Not a valid working directory for build system %r: %s"
% (self.name(), working_dir))
self.package = package or get_developer_package(working_dir)
self.write_build_scripts = write_build_scripts
self.build_args = build_args
self.child_build_args = child_build_args
self.verbose = verbose
self.opts = opts
@classmethod
def is_valid_root(cls, path):
"""Return True if this build system can build the source in path."""
raise NotImplementedError
@classmethod
def child_build_system(cls):
"""Returns the child build system.
Some build systems, such as cmake, don't build the source directly.
Instead, they build an interim set of build scripts that are then
consumed by a second build system (such as make). You should implement
this method if that's the case.
Returns:
Name of build system (corresponding to the plugin name) if this
system has a child system, or None otherwise.
"""
return None
@classmethod
def bind_cli(cls, parser, group):
"""Expose parameters to an argparse.ArgumentParser that are specific
to this build system.
Args:
parser (`ArgumentParser`): Arg parser.
group (`ArgumentGroup`): Arg parser group - you should add args to
this, NOT to `parser`.
"""
pass
def build(self, context, variant, build_path, install_path, install=False,
build_type=BuildType.local):
"""Implement this method to perform the actual build.
Args:
context: A ResolvedContext object that the build process must be
executed within.
variant (`Variant`): The variant being built.
build_path: Where to write temporary build files. May be absolute
or relative to working_dir.
install_path (str): The package repository path to install the
package to, if installing. If None, defaults to
`config.local_packages_path`.
install: If True, install the build.
build_type: A BuildType (i.e local or central).
Returns:
A dict containing the following information:
- success: Bool indicating if the build was successful.
- extra_files: List of created files of interest, not including
build targets. A good example is the interpreted context file,
usually named 'build.rxt.sh' or similar. These files should be
located under build_path. Rez may install them for debugging
purposes.
- build_env_script: If this instance was created with write_build_scripts
as True, then the build should generate a script which, when run
by the user, places them in the build environment.
"""
raise NotImplementedError
@classmethod
def set_standard_vars(cls, executor, context, variant, build_type, install,
build_path, install_path=None):
"""Set some standard env vars that all build systems can rely on.
"""
from rez.config import config
package = variant.parent
variant_requires = map(str, variant.variant_requires)
if variant.index is None:
variant_subpath = ''
else:
variant_subpath = variant._non_shortlinked_subpath
vars_ = {
'REZ_BUILD_ENV': 1,
'REZ_BUILD_PATH': executor.normalize_path(build_path),
'REZ_BUILD_THREAD_COUNT': package.config.build_thread_count,
'REZ_BUILD_VARIANT_INDEX': variant.index or 0,
'REZ_BUILD_VARIANT_REQUIRES': ' '.join(variant_requires),
'REZ_BUILD_VARIANT_SUBPATH': executor.normalize_path(variant_subpath),
'REZ_BUILD_PROJECT_VERSION': str(package.version),
'REZ_BUILD_PROJECT_NAME': package.name,
'REZ_BUILD_PROJECT_DESCRIPTION': (package.description or '').strip(),
'REZ_BUILD_PROJECT_FILE': package.filepath,
'REZ_BUILD_SOURCE_PATH': executor.normalize_path(
os.path.dirname(package.filepath)
),
'REZ_BUILD_REQUIRES': ' '.join(
str(x) for x in context.requested_packages(True)
),
'REZ_BUILD_REQUIRES_UNVERSIONED': ' '.join(
x.name for x in context.requested_packages(True)
),
'REZ_BUILD_TYPE': build_type.name,
'REZ_BUILD_INSTALL': 1 if install else 0,
}
if install_path:
vars_['REZ_BUILD_INSTALL_PATH'] = executor.normalize_path(install_path)
if config.rez_1_environment_variables and \
not config.disable_rez_1_compatibility and \
build_type == BuildType.central:
vars_['REZ_IN_REZ_RELEASE'] = 1
# set env vars
for key, value in vars_.items():
executor.env[key] = value
@classmethod
def add_pre_build_commands(cls, executor, variant, build_type, install,
build_path, install_path=None):
"""Execute pre_build_commands function if present."""
from rez.utils.data_utils import RO_AttrDictWrapper as ROA
# bind build-related values into a 'build' namespace
build_ns = {
"build_type": build_type.name,
"install": install,
"build_path": executor.normalize_path(build_path),
"install_path": executor.normalize_path(install_path)
}
# execute pre_build_commands()
# note that we need to wrap variant in a VariantBinding so that any refs
# to (eg) 'this.root' in pre_build_commands() will get the possibly
# normalized path.
#
pre_build_commands = getattr(variant, "pre_build_commands")
# TODO I suspect variant root isn't correctly set to the cached root
# when pkg caching is enabled (see use of VariantBinding in
# ResolvedContext._execute).
#
bound_variant = VariantBinding(
variant,
interpreter=executor.interpreter
)
if pre_build_commands:
with executor.reset_globals():
executor.bind("this", bound_variant)
executor.bind("build", ROA(build_ns))
executor.execute_code(pre_build_commands)
@classmethod
def add_standard_build_actions(cls, executor, context, variant, build_type,
install, build_path, install_path=None):
"""Perform build actions common to every build system.
"""
# set env vars
cls.set_standard_vars(
executor=executor,
context=context,
variant=variant,
build_type=build_type,
install=install,
build_path=build_path,
install_path=install_path
)
| 0 | 0 | 0 |
5dcb7579adca2910e170cde3ffb6e6b2c855b1b4 | 642 | py | Python | languages/python/src/concepts/P093_OOP_Exceptions_TryExceptBlock.py | vikash-india/DeveloperNotes2Myself | fe277a3c52f73884863f2f72b237365b27a8c882 | [
"MIT"
] | 2 | 2019-05-25T10:09:00.000Z | 2022-03-11T09:06:23.000Z | languages/python/src/concepts/P093_OOP_Exceptions_TryExceptBlock.py | vikash-india/DeveloperNotes2Myself | fe277a3c52f73884863f2f72b237365b27a8c882 | [
"MIT"
] | 2 | 2020-03-31T04:30:17.000Z | 2020-10-30T07:54:28.000Z | languages/python/src/concepts/P093_OOP_Exceptions_TryExceptBlock.py | vikash-india/DeveloperNotes2Myself | fe277a3c52f73884863f2f72b237365b27a8c882 | [
"MIT"
] | 4 | 2019-07-12T13:18:56.000Z | 2021-11-17T08:04:55.000Z | # Description: Exception Handling Using Try-Except Block
import sys
random_list = ["a", 0, 2, "#", 33]
for item in random_list:
try:
reciprocal = 1 / int(item)
print("The reciprocal {} is {}".format(item, reciprocal))
except ValueError:
# Handle one exception type ValueError
print("ValueError: ", sys.exc_info()[0])
except (TypeError, ZeroDivisionError):
# Handle multiple exceptions type - TypeError and ZeroDivisionError
print("TypeError or ZeroDivisionError: ", sys.exc_info()[0])
except:
# Handle all other exceptions
print("Error: ", sys.exc_info()[0])
| 32.1 | 75 | 0.64486 | # Description: Exception Handling Using Try-Except Block
import sys
random_list = ["a", 0, 2, "#", 33]
for item in random_list:
try:
reciprocal = 1 / int(item)
print("The reciprocal {} is {}".format(item, reciprocal))
except ValueError:
# Handle one exception type ValueError
print("ValueError: ", sys.exc_info()[0])
except (TypeError, ZeroDivisionError):
# Handle multiple exceptions type - TypeError and ZeroDivisionError
print("TypeError or ZeroDivisionError: ", sys.exc_info()[0])
except:
# Handle all other exceptions
print("Error: ", sys.exc_info()[0])
| 0 | 0 | 0 |
2a950afe711201b092e94ec25cac483c37306dd2 | 89 | py | Python | __init__.py | redtreeai/img2txt | e58ccc8fea802d07403b67474d5d47eacbd66044 | [
"Apache-2.0"
] | 2 | 2019-05-31T01:23:26.000Z | 2019-11-14T09:45:31.000Z | __init__.py | redtreeai/img2txt | e58ccc8fea802d07403b67474d5d47eacbd66044 | [
"Apache-2.0"
] | null | null | null | __init__.py | redtreeai/img2txt | e58ccc8fea802d07403b67474d5d47eacbd66044 | [
"Apache-2.0"
] | null | null | null | '''
@author: redtree
@contact: redtreec@gmail.com
@time: 17-12-28 上午11:17
@desc:
''' | 8.090909 | 28 | 0.629213 | '''
@author: redtree
@contact: redtreec@gmail.com
@time: 17-12-28 上午11:17
@desc:
''' | 0 | 0 | 0 |
d08c76cd24dbc93762735c0379a49d9da7f40305 | 2,983 | py | Python | mogan/api/validation/parameter_types.py | GURUIFENG9139/rocky-mogan | 6008c1d12b00e70d2cc651f7bd5d47968fc3aec7 | [
"Apache-2.0"
] | null | null | null | mogan/api/validation/parameter_types.py | GURUIFENG9139/rocky-mogan | 6008c1d12b00e70d2cc651f7bd5d47968fc3aec7 | [
"Apache-2.0"
] | null | null | null | mogan/api/validation/parameter_types.py | GURUIFENG9139/rocky-mogan | 6008c1d12b00e70d2cc651f7bd5d47968fc3aec7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common parameter types for validating request Body.
"""
positive_integer = {
'type': ['integer', 'string'],
'pattern': '^[0-9]*$', 'minimum': 1, 'minLength': 1
}
non_negative_integer = {
'type': ['integer', 'string'],
'pattern': '^[0-9]*$', 'minimum': 0, 'minLength': 1
}
name = {
'type': 'string', 'minLength': 1, 'maxLength': 255,
}
description = {
'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255,
}
availability_zone = {
'type': 'string', 'minLength': 1, 'maxLength': 255,
}
image_id = {
'type': 'string', 'format': 'uuid'
}
network_id = {
'type': 'string', 'format': 'uuid'
}
network_port_id = {
'type': 'string', 'format': 'uuid'
}
admin_password = {
# NOTE: admin_password is the admin password of a server
# instance, and it is not stored into mogan's data base.
# In addition, users set sometimes long/strange string
# as password. It is unnecessary to limit string length
# and string pattern.
'type': 'string',
}
flavor_id = {
'type': 'string', 'format': 'uuid'
}
server_group_id = {
'type': 'string', 'format': 'uuid'
}
node_uuid = {
'type': 'string', 'format': 'uuid'
}
metadata = {
'type': 'object',
'patternProperties': {
'^[a-zA-Z0-9-_:. ]{1,255}$': {
'type': 'string', 'maxLength': 255
}
},
'additionalProperties': False
}
resources = {
'type': 'object',
'patternProperties': {
'^[a-zA-Z0-9-_:.]{1,255}$': positive_integer
},
'additionalProperties': False
}
mac_address = {
'type': 'string',
'pattern': '^([0-9a-fA-F]{2})(:[0-9a-fA-F]{2}){5}$'
}
ip_address = {
'type': 'string',
'oneOf': [
{'format': 'ipv4'},
{'format': 'ipv6'}
]
}
personality = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'path': {'type': 'string'},
'contents': {
'type': 'string',
'format': 'base64'
}
},
'additionalProperties': False,
}
}
boolean = {
'type': ['boolean', 'string'],
'enum': [True, 'True', 'TRUE', 'true', '1', 'ON', 'On', 'on',
'YES', 'Yes', 'yes',
False, 'False', 'FALSE', 'false', '0', 'OFF', 'Off', 'off',
'NO', 'No', 'no'],
}
| 21.307143 | 78 | 0.552799 | # Copyright 2017 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common parameter types for validating request Body.
"""
positive_integer = {
'type': ['integer', 'string'],
'pattern': '^[0-9]*$', 'minimum': 1, 'minLength': 1
}
non_negative_integer = {
'type': ['integer', 'string'],
'pattern': '^[0-9]*$', 'minimum': 0, 'minLength': 1
}
name = {
'type': 'string', 'minLength': 1, 'maxLength': 255,
}
description = {
'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255,
}
availability_zone = {
'type': 'string', 'minLength': 1, 'maxLength': 255,
}
image_id = {
'type': 'string', 'format': 'uuid'
}
network_id = {
'type': 'string', 'format': 'uuid'
}
network_port_id = {
'type': 'string', 'format': 'uuid'
}
admin_password = {
# NOTE: admin_password is the admin password of a server
# instance, and it is not stored into mogan's data base.
# In addition, users set sometimes long/strange string
# as password. It is unnecessary to limit string length
# and string pattern.
'type': 'string',
}
flavor_id = {
'type': 'string', 'format': 'uuid'
}
server_group_id = {
'type': 'string', 'format': 'uuid'
}
node_uuid = {
'type': 'string', 'format': 'uuid'
}
metadata = {
'type': 'object',
'patternProperties': {
'^[a-zA-Z0-9-_:. ]{1,255}$': {
'type': 'string', 'maxLength': 255
}
},
'additionalProperties': False
}
resources = {
'type': 'object',
'patternProperties': {
'^[a-zA-Z0-9-_:.]{1,255}$': positive_integer
},
'additionalProperties': False
}
mac_address = {
'type': 'string',
'pattern': '^([0-9a-fA-F]{2})(:[0-9a-fA-F]{2}){5}$'
}
ip_address = {
'type': 'string',
'oneOf': [
{'format': 'ipv4'},
{'format': 'ipv6'}
]
}
personality = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'path': {'type': 'string'},
'contents': {
'type': 'string',
'format': 'base64'
}
},
'additionalProperties': False,
}
}
boolean = {
'type': ['boolean', 'string'],
'enum': [True, 'True', 'TRUE', 'true', '1', 'ON', 'On', 'on',
'YES', 'Yes', 'yes',
False, 'False', 'FALSE', 'false', '0', 'OFF', 'Off', 'off',
'NO', 'No', 'no'],
}
| 0 | 0 | 0 |
75e838a4394ded0ea2e9eb939c11fb31a274332e | 1,226 | py | Python | appbasico/migrations/0008_auto_20210409_1835.py | brunovirgilio/django-basico | 156a49cf70cd5c261c3662b62d69e9696c76598f | [
"MIT"
] | 1 | 2021-07-09T06:19:53.000Z | 2021-07-09T06:19:53.000Z | appbasico/migrations/0008_auto_20210409_1835.py | brunovirgilio/django-basico | 156a49cf70cd5c261c3662b62d69e9696c76598f | [
"MIT"
] | null | null | null | appbasico/migrations/0008_auto_20210409_1835.py | brunovirgilio/django-basico | 156a49cf70cd5c261c3662b62d69e9696c76598f | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-04-09 21:35
from django.db import migrations, models
import django.utils.timezone
| 29.902439 | 121 | 0.584829 | # Generated by Django 3.1.7 on 2021-04-09 21:35
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('appbasico', '0007_auto_20210407_1431'),
]
operations = [
migrations.AddField(
model_name='postagem',
name='ativo',
field=models.BooleanField(default=True, verbose_name='Ativo7'),
),
migrations.AddField(
model_name='postagem',
name='criado',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='Data de Criação'),
preserve_default=False,
),
migrations.AddField(
model_name='postagem',
name='modificado',
field=models.DateField(auto_now=True, verbose_name='Data de Atualização'),
),
migrations.AlterField(
model_name='postagem',
name='nome',
field=models.CharField(default='', max_length=100),
),
migrations.AlterField(
model_name='postagem',
name='post',
field=models.CharField(default='', max_length=140),
),
]
| 0 | 1,087 | 23 |
4508d181044add4dc60d5b3b6da615c480a288a2 | 858 | py | Python | setup.py | AlyaGomaa/bitsbehumble | 2e7ee1f8beb727974957f5a3bf111df3f8239594 | [
"MIT"
] | 13 | 2020-06-22T15:00:38.000Z | 2021-08-30T05:28:04.000Z | setup.py | AlyaGomaa/bitsbehumble | 2e7ee1f8beb727974957f5a3bf111df3f8239594 | [
"MIT"
] | null | null | null | setup.py | AlyaGomaa/bitsbehumble | 2e7ee1f8beb727974957f5a3bf111df3f8239594 | [
"MIT"
] | 1 | 2020-06-22T16:35:25.000Z | 2020-06-22T16:35:25.000Z |
from setuptools import *
with open("README.md", "r") as fh:
long_description = fh.read()
my_packages=find_packages()
setup(
name = 'bitsbehumble',
packages = my_packages,
version = '0.5',
long_description=long_description,
long_description_content_type="text/markdown",
author = 'Alya Gomaa',
url = 'https://github.com/AlyaGomaa/bitsbehumble',
download_url = 'https://github.com/AlyaGomaa/bitsbehumble/releases/tag/v-2.0.01',
keywords = ['CTF', 'Converter'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| 26 | 84 | 0.635198 |
from setuptools import *
with open("README.md", "r") as fh:
long_description = fh.read()
my_packages=find_packages()
setup(
name = 'bitsbehumble',
packages = my_packages,
version = '0.5',
long_description=long_description,
long_description_content_type="text/markdown",
author = 'Alya Gomaa',
url = 'https://github.com/AlyaGomaa/bitsbehumble',
download_url = 'https://github.com/AlyaGomaa/bitsbehumble/releases/tag/v-2.0.01',
keywords = ['CTF', 'Converter'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| 0 | 0 | 0 |
a58f3a0841bc4642e7bb06398e78041517c53a26 | 4,142 | py | Python | scripts/migrate_experiment_designs.py | SD2E/python-datacatalog | 51ab366639505fb6e8a14cd6b446de37080cd20d | [
"CNRI-Python"
] | null | null | null | scripts/migrate_experiment_designs.py | SD2E/python-datacatalog | 51ab366639505fb6e8a14cd6b446de37080cd20d | [
"CNRI-Python"
] | 2 | 2019-07-25T15:39:04.000Z | 2019-10-21T15:31:46.000Z | scripts/migrate_experiment_designs.py | SD2E/python-datacatalog | 51ab366639505fb6e8a14cd6b446de37080cd20d | [
"CNRI-Python"
] | 1 | 2019-10-15T14:33:44.000Z | 2019-10-15T14:33:44.000Z | # Note: Read and understand https://gitlab.sd2e.org/sd2program/python-datacatalog/merge_requests/190
# Before running!
import pymongo
from datacatalog.identifiers.typeduuid import catalog_uuid
# Update this to target production
# This should only need to be done once
dbURI = "mongodb://catalog:catalog@localhost:27017/?authSource=admin"
client = pymongo.MongoClient(dbURI)
experiment_designs = client.catalog_local.experiment_designs
experiments = client.catalog_local.experiments
design_uri_map = {}
# Find designs with the same URI - these are candidates for remapping and deletion
design_matches = experiment_designs.find({})
for design_match in design_matches:
uri = design_match["uri"]
if uri is not None:
if uri not in design_uri_map:
design_uri_map[uri] = []
design_uri_map[uri].append(design_match)
for key in design_uri_map:
design_len = len(design_uri_map[key])
if design_len > 2:
# This would be very unusual - check these cases manually if found
raise ValueError("More than 2 designs for a URI? {}".format(key))
elif design_len == 2:
# We have a new design and an old design. Find experiments linked to the old design,
# remap them to the new design, and remove the old design.
old_design = None
new_design = None
for design in design_uri_map[key]:
# old designs have uuids derived from the experiment design id
# new designs have uuids derived from the uri
design_id_uuid = catalog_uuid(design["experiment_design_id"], uuid_type='experiment_design')
uri_uuid = catalog_uuid(design["uri"], uuid_type='experiment_design')
if design["uuid"] == design_id_uuid:
old_design = design
elif design["uuid"] == uri_uuid:
new_design = design
else:
raise ValueError("Could not identify old/new design for {}".format(key))
if old_design is not None and new_design is not None and old_design != new_design:
experiment_matches = experiments.find( { "child_of" : [old_design["uuid"]] })
e_match_list = []
for experiment_match in experiment_matches:
e_match_list.append(experiment_match)
if len(e_match_list) >= 1:
print("Found matching experiments, remapping for: {} old design uuid {} new design uuid {}".format(key, old_design["uuid"], new_design["uuid"]))
for e_match in e_match_list:
e_record_id = e_match["_id"]
new_child_of = [new_design["uuid"]]
print("Remapping {} from {} to {}".format(e_record_id, e_match["child_of"], new_child_of))
# update child_of
experiments.update({ "_id" : e_record_id },
{ "$set":
{
"child_of" : new_child_of
}
})
# Map over the old designs created and updated dates (both Google Docs and Mongo times)
previous_created = old_design["created"]
previous_updated = old_design["updated"]
properties_previous_created = old_design["_properties"]["created_date"]
properties_previous_modified = old_design["_properties"]["modified_date"]
# update experiment design with the create/modify dates of the previous design it is replacing
experiment_designs.update({ "_id" : new_design["_id"] },
{ "$set":
{
"created" : previous_created,
"updated" : previous_updated,
"_properties.created_date" : properties_previous_created,
"_properties.modified_date" : properties_previous_modified
}
})
# after remapping, regardless if any experiments are found, delete the old design
print("Removing design: {}".format(old_design["uuid"]))
experiment_designs.delete_one({'uuid' : old_design["uuid"]})
| 44.537634 | 160 | 0.619507 | # Note: Read and understand https://gitlab.sd2e.org/sd2program/python-datacatalog/merge_requests/190
# Before running!
import pymongo
from datacatalog.identifiers.typeduuid import catalog_uuid
# Update this to target production
# This should only need to be done once
dbURI = "mongodb://catalog:catalog@localhost:27017/?authSource=admin"
client = pymongo.MongoClient(dbURI)
experiment_designs = client.catalog_local.experiment_designs
experiments = client.catalog_local.experiments
design_uri_map = {}
# Find designs with the same URI - these are candidates for remapping and deletion
design_matches = experiment_designs.find({})
for design_match in design_matches:
uri = design_match["uri"]
if uri is not None:
if uri not in design_uri_map:
design_uri_map[uri] = []
design_uri_map[uri].append(design_match)
for key in design_uri_map:
design_len = len(design_uri_map[key])
if design_len > 2:
# This would be very unusual - check these cases manually if found
raise ValueError("More than 2 designs for a URI? {}".format(key))
elif design_len == 2:
# We have a new design and an old design. Find experiments linked to the old design,
# remap them to the new design, and remove the old design.
old_design = None
new_design = None
for design in design_uri_map[key]:
# old designs have uuids derived from the experiment design id
# new designs have uuids derived from the uri
design_id_uuid = catalog_uuid(design["experiment_design_id"], uuid_type='experiment_design')
uri_uuid = catalog_uuid(design["uri"], uuid_type='experiment_design')
if design["uuid"] == design_id_uuid:
old_design = design
elif design["uuid"] == uri_uuid:
new_design = design
else:
raise ValueError("Could not identify old/new design for {}".format(key))
if old_design is not None and new_design is not None and old_design != new_design:
experiment_matches = experiments.find( { "child_of" : [old_design["uuid"]] })
e_match_list = []
for experiment_match in experiment_matches:
e_match_list.append(experiment_match)
if len(e_match_list) >= 1:
print("Found matching experiments, remapping for: {} old design uuid {} new design uuid {}".format(key, old_design["uuid"], new_design["uuid"]))
for e_match in e_match_list:
e_record_id = e_match["_id"]
new_child_of = [new_design["uuid"]]
print("Remapping {} from {} to {}".format(e_record_id, e_match["child_of"], new_child_of))
# update child_of
experiments.update({ "_id" : e_record_id },
{ "$set":
{
"child_of" : new_child_of
}
})
# Map over the old designs created and updated dates (both Google Docs and Mongo times)
previous_created = old_design["created"]
previous_updated = old_design["updated"]
properties_previous_created = old_design["_properties"]["created_date"]
properties_previous_modified = old_design["_properties"]["modified_date"]
# update experiment design with the create/modify dates of the previous design it is replacing
experiment_designs.update({ "_id" : new_design["_id"] },
{ "$set":
{
"created" : previous_created,
"updated" : previous_updated,
"_properties.created_date" : properties_previous_created,
"_properties.modified_date" : properties_previous_modified
}
})
# after remapping, regardless if any experiments are found, delete the old design
print("Removing design: {}".format(old_design["uuid"]))
experiment_designs.delete_one({'uuid' : old_design["uuid"]})
| 0 | 0 | 0 |
da7a312c2f8770733f78d6cb798abc080246a0bd | 6,965 | py | Python | api/views.py | bpatyi/simpleCRM | bf74f0e0d783ea4538fb96b6790474d991175b51 | [
"MIT"
] | 2 | 2016-10-03T08:35:07.000Z | 2016-10-04T07:22:20.000Z | api/views.py | bpatyi/simpleCRM | bf74f0e0d783ea4538fb96b6790474d991175b51 | [
"MIT"
] | null | null | null | api/views.py | bpatyi/simpleCRM | bf74f0e0d783ea4538fb96b6790474d991175b51 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.views.generic.base import TemplateView
from rest_framework import generics, permissions, parsers
from rest_framework_docs.api_docs import ApiDocumentation
from rest_framework_docs.settings import DRFSettings
from api.serializers import (
IndividualSerializer,
InboundContactSerializer,
OutboundContactSerializer,
CampaignSerializer,
SourceSerializer,
SourceTypeSerializer
)
from crm.models import (
Individual,
InboundContact,
OutboundContact,
Source,
Campaign,
SourceType
)
| 32.24537 | 88 | 0.788658 | from django.shortcuts import render
from django.views.generic.base import TemplateView
from rest_framework import generics, permissions, parsers
from rest_framework_docs.api_docs import ApiDocumentation
from rest_framework_docs.settings import DRFSettings
from api.serializers import (
IndividualSerializer,
InboundContactSerializer,
OutboundContactSerializer,
CampaignSerializer,
SourceSerializer,
SourceTypeSerializer
)
from crm.models import (
Individual,
InboundContact,
OutboundContact,
Source,
Campaign,
SourceType
)
class ApiEndpoints(TemplateView):
template_name = "endpoints.html"
def get_context_data(self, **kwargs):
settings = DRFSettings().settings
if settings["HIDE_DOCS"]:
raise Http404("Django Rest Framework Docs are hidden. Check your settings.")
context = super(ApiEndpoints, self).get_context_data(**kwargs)
docs = ApiDocumentation()
endpoints = docs.get_endpoints()
query = self.request.GET.get("search", "")
if query and endpoints:
endpoints = [endpoint for endpoint in endpoints if query in endpoint.path]
for endpoint in endpoints:
if '<pk>' in endpoint.path:
endpoint.link = endpoint.path.replace('<pk>', '0')
else:
endpoint.link = endpoint.path
context['query'] = query
context['endpoints'] = endpoints
return context
class IndividualListAPI(generics.ListAPIView):
queryset = Individual.objects.all()
serializer_class = IndividualSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class IndividualCreateAPI(generics.CreateAPIView):
serializer_class = IndividualSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
parser_classes = (parsers.MultiPartParser, parsers.FormParser,)
class IndividualRetrieveAPI(generics.RetrieveAPIView):
queryset = Individual.objects.all()
serializer_class = IndividualSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class IndividualUpdateAPI(generics.UpdateAPIView):
serializer_class = IndividualSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class IndividualDestroyAPI(generics.DestroyAPIView):
serializer_class = IndividualSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class InboundContactListAPI(generics.ListAPIView):
queryset = InboundContact.objects.all()
serializer_class = InboundContactSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class InboundContactCreateAPI(generics.CreateAPIView):
serializer_class = InboundContactSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class InboundContactRetrieveAPI(generics.RetrieveAPIView):
queryset = InboundContact.objects.all()
serializer_class = InboundContactSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class InboundContactUpdateAPI(generics.UpdateAPIView):
serializer_class = InboundContactSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class InboundContactDestroyAPI(generics.DestroyAPIView):
serializer_class = InboundContactSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class OutboundContactListAPI(generics.ListAPIView):
queryset = OutboundContact.objects.all()
serializer_class = OutboundContactSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class OutboundContactCreateAPI(generics.CreateAPIView):
serializer_class = OutboundContactSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class OutboundContactRetrieveAPI(generics.RetrieveAPIView):
queryset = OutboundContact.objects.all()
serializer_class = OutboundContactSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class OutboundContactUpdateAPI(generics.UpdateAPIView):
serializer_class = OutboundContactSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class OutboundContactDestroyAPI(generics.DestroyAPIView):
serializer_class = OutboundContactSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class CampaignListAPI(generics.ListAPIView):
queryset = Campaign.objects.all()
serializer_class = CampaignSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class CampaignCreateAPI(generics.CreateAPIView):
serializer_class = CampaignSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class CampaignRetrieveAPI(generics.RetrieveAPIView):
queryset = Campaign.objects.all()
serializer_class = CampaignSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class CampaignUpdateAPI(generics.UpdateAPIView):
serializer_class = CampaignSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class CampaignDestroyAPI(generics.DestroyAPIView):
serializer_class = CampaignSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class SourceTypeListAPI(generics.ListAPIView):
queryset = SourceType.objects.all()
serializer_class = SourceTypeSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class SourceTypeCreateAPI(generics.CreateAPIView):
serializer_class = SourceTypeSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class SourceTypeRetrieveAPI(generics.RetrieveAPIView):
queryset = SourceType.objects.all()
serializer_class = SourceTypeSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class SourceTypeUpdateAPI(generics.UpdateAPIView):
serializer_class = SourceTypeSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class SourceTypeDestroyAPI(generics.DestroyAPIView):
serializer_class = SourceTypeSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class SourceListAPI(generics.ListAPIView):
queryset = Source.objects.all()
serializer_class = SourceSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class SourceCreateAPI(generics.CreateAPIView):
serializer_class = SourceSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class SourceRetrieveAPI(generics.RetrieveAPIView):
queryset = Source.objects.all()
serializer_class = SourceSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class SourceUpdateAPI(generics.UpdateAPIView):
serializer_class = SourceSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class SourceDestroyAPI(generics.DestroyAPIView):
serializer_class = SourceSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
| 804 | 4,842 | 713 |
74eb31bf50e4f0475673bcfefa2b48567fa9e685 | 13,404 | py | Python | paging/paging.py | jounikor/3gpp | 1f0aa0ffa8c83a62740e28fa0d5769b1564edc09 | [
"Unlicense"
] | null | null | null | paging/paging.py | jounikor/3gpp | 1f0aa0ffa8c83a62740e28fa0d5769b1564edc09 | [
"Unlicense"
] | null | null | null | paging/paging.py | jounikor/3gpp | 1f0aa0ffa8c83a62740e28fa0d5769b1564edc09 | [
"Unlicense"
] | 1 | 2022-03-10T16:03:29.000Z | 2022-03-10T16:03:29.000Z | #
# Version 0.1 (c) 2018 Jouni Korhonen
#
#
#import exceptions
import rrcconfig as rrc
import nasconfig as nas
import math as m
# See 36.304 subclause 7.2
# i_s=0 i_s=1 i_s=2 i_s=3
sf_pattern_npdcch_or_mpdcch_gt_3MHz_fdd = (
(9, None, None, None), # Ns = 1
(4, 9, None, None), # Ns = 2
(0, 4, 5, 9) # Ns = 4
)
# See 36.304 subclause 7.2
# i_s=0 i_s=1 i_s=2 i_s=3
sf_pattern_mpdcch_14_or_3MHz_fdd = (
(5, None, None, None), # Ns = 1
(5, 5, None, None), # Ns = 2
(5, 5, 5, 5) # Ns = 4
)
#
# LTE-M
#
#
#
#def bdiv(N,D):
#
# Q,R = 0,0
# i = 1 << 64
#
# while (i > 0):
# R <<= 1
# R |= (1 if (N & i) else 0)
# if (R >= D):
# R -= D
# Q |= i
# i >>= 1
#
# return Q,R
| 33.014778 | 114 | 0.559609 | #
# Version 0.1 (c) 2018 Jouni Korhonen
#
#
#import exceptions
import rrcconfig as rrc
import nasconfig as nas
import math as m
# See 36.304 subclause 7.2
# i_s=0 i_s=1 i_s=2 i_s=3
sf_pattern_npdcch_or_mpdcch_gt_3MHz_fdd = (
(9, None, None, None), # Ns = 1
(4, 9, None, None), # Ns = 2
(0, 4, 5, 9) # Ns = 4
)
# See 36.304 subclause 7.2
# i_s=0 i_s=1 i_s=2 i_s=3
sf_pattern_mpdcch_14_or_3MHz_fdd = (
(5, None, None, None), # Ns = 1
(5, 5, None, None), # Ns = 2
(5, 5, 5, 5) # Ns = 4
)
#
class paging(object):
SYSTEM_BW_1_4 = 1.4
SYSTEM_BW_3 = 3
SYSTEM_BW_5 = 5
SYSTEM_BW_10 = 10
SYSTEM_BW_15 = 15
SYSTEM_BW_20 = 20
#
def init_PTW(self,edrx):
self.inside_PTW = edrx
self.ph = False
#
def configure_PTW(self,PTW_sta=None,PTW_end=None,PTW_len=None):
self.PTW_sta = PTW_sta
self.PTW_end = PTW_end
self.PTW_len = PTW_len
self.inside_PTW = True if PTW_sta is None else False
self.ph = True
#
def inside_PTW_test(self,sfn):
# Check if we need are inside the PTW
if (self.ph and not self.inside_PTW):
# Case 1: PTW_sta < PTW_end
if (self.PTW_sta < self.PTW_end):
if (sfn >= self.PTW_sta and sfn <= self.PTW_end):
self.inside_PTW = True
# Case 2: PTW_sta > PTW_end i.e. PTW wrapped hyper frame boundary
if (self.PTW_sta > self.PTW_end):
if (sfn >= self.PTW_end and sfn <= self.PTW_sta):
self.inside_PTW = True
#
inside_PTW = self.inside_PTW
#
if (self.inside_PTW and self.ph):
self.PTW_len -= 1
if (self.PTW_len == 0):
self.inside_PTW = False
self.ph = False
#
return inside_PTW
#
def __init__(self,rel=13,fractional_nB=False,debug=False):
self.rel = rel
self.debug = debug
self.fractional_nB = fractional_nB
if (rel < 13 or rel > 14):
raise NotImplementedError(f"3GPP Release-{rel} paging supportied")
# modulo is for calculating the UE_ID
# 36.304 subclause 7.1:
# IMSI mod 4096, if P-RNTI is monitored on NPDCCH.
# IMSI mod 16384, if P-RNTI is monitored on MPDCCH or if P-RNTI is monitored on NPDCCH
# and the UE supports paging on a non-anchor carrier, and if paging configuration for
# non-anchor carrier is provided in system information.
# This class is RAT agnostic thus the caller has to be RAT aware.
#
def setparameters(self,T,TeDRX,nB,sf_pattern,modulo,shift=0,L=0):
self.T = T
self.TeDRX = TeDRX
self.nB = nB
# Sanity check with eDRX parameters
if (L > TeDRX):
raise ValueError(f"Extended DRX cycle less or equal than PTW.")
# This code takes into account the "fractional nB" case, which
# was discussed in RAN2#105 and 106 meetings with an outcome:
# "RAN2 understands that nB value can be fractional".
# Here we have two implementation where 0 < N < 1 is possible or
# N=1 when nB < 1
self.N = min(T,nB)
if (self.fractional_nB is False and self.N < 1):
self.N = 1
self.Ns = int(max(1,nB/T))
self.sf_pattern = sf_pattern
self.modulo = modulo
self.shift = shift
self.L = L
if (self.debug):
print(f"In setparameters() -> Ns: {self.Ns}, modulo: {self.modulo}, shift: {self.shift}, L: {self.L}")
# The algorithm is described in more detail in 36.304 Annex B
def mod2div_(self,N,D):
D <<= 31
for i in range(32):
if ((N & D) & 0x8000000000000000):
N ^= D
N <<= 1
return N >> 32
#
def get_UE_ID(self,imsi):
if (type(imsi) == str):
imsi = int(imsi)
return imsi % self.modulo
# See 36.304 subclause 7.2 and Annex B
def get_UE_ID_H(self,s_tmsi):
if (type(s_tmsi) == str):
s_tmsi = int(s_tmsi)
Y1 = 0xC704DD7B
D = 0x104C11DB7
s_tmsi <<= 32 # k=32
Y2 = self.mod2div_(s_tmsi,D)
return ((Y1 ^ Y2) ^ 0xffffffff)
# Check if there is a PO in this SFN. If yes return both PO and PF.
def gotpaged_DRX(self,imsi,SFN):
UE_ID = self.get_UE_ID(imsi)
#
i_s = m.floor((UE_ID / self.N)) % self.Ns
PO = int(self.sf_pattern[self.Ns>>1][i_s])
PF = int((self.T / self.N) * (UE_ID % self.N))
if (self.debug):
print(f"SFN: {SFN}, UE_ID: {UE_ID:#06x}, PF: {PF}, i_s: {i_s}, PO: {PO}, "
f"(T div N): {int(self.T/self.N)}, (UE_ID mod N): {UE_ID % self.N}")
return ((SFN % self.T) == PF),PF,PO
#
# Check if the s_tmsi has a potential PO within this HSFN.
#
# Input:
# s_tmsi - s_tmsi for the UE
# HSFN - 10 bit hyper frame counter
#
# Returns:
# pagehit, PTW_start, PTW_end, (HSFN % TeDRXH),(UE_ID_H % TeDRXH)
#
# pagehit - boolean if there is a potential PO in this HSFN
# PTW_start - start SFN for the PTW
# PTW_end - end SFN % 1000 for the PTW
# L - lenght of the PTW in SFNs
#
def gotpaged_eDRX(self,s_tmsi,HSFN):
# extended DRX not in use
if (self.TeDRX == 0):
return False,0,0,0
#
if (type(s_tmsi) == str):
s_tmsi = int(s_tmsi)
TeDRXH = self.TeDRX >> 10
# 36.304 subclause 7.3:
# UE_ID_H is 12 most significant bits, if P-RNTI is monitored on NPDCCH -> shift 20
# UE_ID_H is 10 most significant bits, if P-RNTI is monitored on (M=PDCCH -> shift 22
#
UE_ID_H_noshift = self.get_UE_ID_H(s_tmsi)
UE_ID_H = UE_ID_H_noshift >> self.shift
ieDRX = m.floor((UE_ID_H / TeDRXH)) % 4
PTW_start = 256 * ieDRX
# L is already *100
PTW_end = (PTW_start + self.L - 1) % 1024
if (self.debug):
print( f"In paging.gotpaged_eDRX()")
print( f" HSFN = {HSFN} s_tmsi = {s_tmsi:#010x}")
print( f" UE_ID_H_noshift = {UE_ID_H_noshift:#010x}, UE_ID_H = {UE_ID_H:#04x}")
print( f" TeDRX>>10 (TeDRXH) = {TeDRXH}, ieDRX = {ieDRX}")
print( f" PTW_start = {PTW_start}, PTW_end = {PTW_end}, L (PTW*100) = {self.L}")
print( f" (HSFN % TeDRXH) = {HSFN % TeDRXH}, (UE_ID_H % TeDRXH) = {UE_ID_H % TeDRXH}")
# PH is H-SFN when H-SFN mod TeDRX,H= (UE_ID_H mod TeDRX,H)
return ((HSFN % TeDRXH) == (UE_ID_H % TeDRXH)),PTW_start,PTW_end,self.L
def get_timeout(self):
pass
# LTE-M
class pagingLTEM(paging):
def __init__(self,sysbw=paging.SYSTEM_BW_5,rel=13,frac=False,debug=False):
# This mimics SIB1-BR eDRX-Allowed-r13 flag
#
# See 36.304 subclause 7.2 for system bw and RAT based
# table selections.
#
super (pagingLTEM,self).__init__(rel,frac,debug)
if (sysbw > paging.SYSTEM_BW_3):
self.sf_pattern = sf_pattern_npdcch_or_mpdcch_gt_3MHz_fdd
else:
self.sf_pattern = sf_pattern_mpdcch_14_or_3MHz_fdd
if (debug):
print( f"In pagingLTEM.__init__()\n"
f" Release = {rel}\n"
f" sysbw = {sysbw}")
#
#
def configure(self,sib2,drxie=None,edrxie=None):
# get default paging cycle from SIB2
T = sib2.radioResourceConfigCommon.pcch_Config.defaultPagingCycle
TeDRX = 0
sf_pattern = self.sf_pattern
modulo = 16384
L = 0
# If upper layer provided eDRX parameters configure based on those
if (edrxie and hasattr(edrxie,"TeDRX")):
# If upper layer provided eDRX cycle is 512 then monitor PO according
# 36.304 subclause 7.1 algorithm using T = 512
# Otherwise use subclause 7.3 algorithm to find the start of the
# paging window and then use subclause 7.1 algorithm to find the PO
if (edrxie.TeDRX < 1024):
T = edrxie.TeDRX
TeDRX = 0
else:
TeDRX = edrxie.TeDRX
L = edrxie.PTW
# If upper layer provided UE specific DRX parameter configuration..
if (drxie and hasattr(edrxie,"DRX")):
T = drxie.DRX
TeDRX = 0
# Precalculate nB
if (sib2.radioResourceConfigCommon.pcch_Config_v1310.nB_v1310 is not None):
nB = T * sib2.radioResourceConfigCommon.pcch_Config_v1310.nB_v1310
else:
nB = T * sib2.radioResourceConfigCommon.pcch_Config.nB
# Paging narrow bands.
self.Nn = sib2.radioResourceConfigCommon.pcch_Config_v1310.paging_narrowBands_r13
if (self.debug):
print( f"In pagingLTEM.configure()\n"
f" T = {T}, Nb = {nB}, Nn = {self.Nn}\n"
f" TeDRX = {TeDRX}, L (PTW*100) = {L}\n"
f" modulo = {modulo}, shift = {22}\n")
# setup common parameters
super(pagingLTEM,self).setparameters(T,TeDRX,nB,sf_pattern,modulo,22,L)
#
def paging_carrier(self,imsi):
UE_ID = self.get_UE_ID(imsi)
return int(1+m.floor((UE_ID / (self.N * self.Ns))) % self.Nn)
class pagingNB(paging):
def __init__(self,rel,frac,debug):
super (pagingNB,self).__init__(rel,frac,debug)
self.rel = rel
self.debug = debug
self.sf_pattern = sf_pattern_npdcch_or_mpdcch_gt_3MHz_fdd
#
# See 36.304 subclause 7.2 for system bw and RAT based
# table selections.
#
def configure(self,sib2,sib22=None,edrxie=None):
sf_pattern = self.sf_pattern
modulo = 4096
self.TeDRX = 0
L = 0
TeDRX = 0
#
# 34.304 subclause 7.1 for Rel-14 and greater
# Index 0 is the anchor carrier.. and contains the weight of the carrier
# Default to w0
self.W = [0]
self.Nn = 1
self.Wall = 0
# Also, the anchor carrier may have a weight
if (sib22 and hasattr(sib22, "pagingWeightAnchor_r14")):
# Anchor carrier weight is the index 0 of the pagingCarriersWeight
# If pagingWeightAnchor is absent, then 36.331 sublause 6.7.3.1 for
# SystemInformationBlock22-NB states that w0 (=0 weight) for anchor carrier
# is used, which means no paging takes place on anchor carrier.
# 36.304 subclause 7.1 for paging carrier will always skip W[0] as its
# weight is 0.
self.W[0] = sib22.pagingWeightAnchor_r14
self.Wall += sib22.pagingWeightAnchor_r14
# If non-anchor carriers exist..
if (sib22 and hasattr(sib22, "dl_ConfigList_r14")):
n = sib22.dl_ConfigList_r14.__len__()
i = 0
# SIB22-NB contained configuration for non-anchor carrier paging.
# Calculate cumulativer total weight of all non-anchor carriers.
while (i < n):
self.Wall += sib22.dl_ConfigList_r14[i].pcch_Config_r14.pagingWeight_r14
self.W.append(self.Wall)
i += 1
self.Nn += n
print(f"*** self.Nn = {self.Nn}, self.Wall = {self.Wall}")
# If P-RNTI is monitored on NPDCCH and UE supports paging on a non-anchor
# carrier then UE_ID = IMSI mod 16384
modulo = 16384
# get default paging cycle from SIB2-NB
T = sib2.radioResourceConfigCommon_r13.pcch_Config_r13.defaultPagingCycle_r13
# If upper layer provided eDRX parameters configure based on those
if (edrxie and hasattr(edrxie,"TeDRX")):
# If upper layer provided eDRX cycle is 512 then monitor PO according
# 36.304 subclause 7.1 algorithm using T = 512
# Otherwise use subclause 7.3 algorithm to find the start of the
# paging window and then use subclause 7.1 algorithm to find the PO
if (edrxie.TeDRX > 1024):
TeDRX = edrxie.TeDRX
L = edrxie.PTW
nB = T * sib2.radioResourceConfigCommon_r13.pcch_Config_r13.nB_r13
super(pagingNB,self).setparameters(T,TeDRX,nB,sf_pattern,modulo,20,L)
return self.TeDRX > 0
def paging_carrier(self,imsi):
# Non-anchor paging supported only for Rel-14 or above, and
# when non-anchor configuration has been provided in SIB22-NB.
#
# Returns:
# carrier number (0 is the anchor)
#
if (self.rel < 14 or self.Nn == 1):
return 0
n = 0
UE_ID = self.get_UE_ID(imsi)
# wmod = floor(UE_ID/(self.N*self.Ns)) mod W
wmod = m.floor((UE_ID / (self.N*self.Ns))) % self.Wall
while (n <= self.Nn-1 and wmod >= self.W[n]):
n += 1
return m.floor(n)
#def bdiv(N,D):
#
# Q,R = 0,0
# i = 1 << 64
#
# while (i > 0):
# R <<= 1
# R |= (1 if (N & i) else 0)
# if (R >= D):
# R -= D
# Q |= i
# i >>= 1
#
# return Q,R
| 10,735 | 1,571 | 227 |
bc4c0d17d5a50732f9ab3b4888be555842ab0bdb | 3,537 | py | Python | app/controllers/main.py | akotlerman/flask-website | 1e1e659a2fcab522c4179089d370b5783aff1eb1 | [
"BSD-2-Clause"
] | null | null | null | app/controllers/main.py | akotlerman/flask-website | 1e1e659a2fcab522c4179089d370b5783aff1eb1 | [
"BSD-2-Clause"
] | null | null | null | app/controllers/main.py | akotlerman/flask-website | 1e1e659a2fcab522c4179089d370b5783aff1eb1 | [
"BSD-2-Clause"
] | null | null | null | from flask import Blueprint, render_template, flash, request, redirect, url_for, jsonify, abort
from app.extensions import cache, pages
from app.tasks import long_task
import flam3, io, base64, struct
from PIL import Image
main = Blueprint('main', __name__)
@main.route('/')
@cache.cached(timeout=1000)
@main.route('/task', methods=['GET', 'POST'])
@main.route('/adder')
@main.route('/api/add_numbers')
@main.route('/flam3')
@main.route('/api/gen_flam3')
@main.route('/status/<task_id>')
@main.route('/<path:folder>/<path:path>/')
@main.route('/<path:folder>/')
@main.route('/topics/')
| 29.722689 | 108 | 0.648007 | from flask import Blueprint, render_template, flash, request, redirect, url_for, jsonify, abort
from app.extensions import cache, pages
from app.tasks import long_task
import flam3, io, base64, struct
from PIL import Image
main = Blueprint('main', __name__)
@main.route('/')
@cache.cached(timeout=1000)
def home():
return render_template('index.html')
@main.route('/task', methods=['GET', 'POST'])
def index():
return render_template("longtask.html")
@main.route('/adder')
def adder():
return render_template("adder.html")
@main.route('/api/add_numbers')
def add_numbers():
a = request.args.get('a', 0, type=int)
b = request.args.get('b', 0, type=int)
return jsonify(result=a + b)
@main.route('/flam3')
def flam3_html():
return render_template("flam3.html")
def hex_to_rgb(hexstr):
return struct.unpack('BBB', b''.fromhex(hexstr[1:]))
@main.route('/api/gen_flam3')
def gen_flam3():
point_count = request.args.get('point_count', 0, type=int)
back_color = request.args.get('back_color', "#42426f", type=hex_to_rgb)
front_color = request.args.get('front_color', "#f4a460", type=hex_to_rgb)
selection_limiter = request.args.get('selection_limiter', None, type=str)
colors = (back_color, front_color)
print('selection is', selection_limiter)
# Make sure selection limiter is sane
if selection_limiter is None:
selection_limiter = [False]*point_count
else:
selection_limiter = [bool(int(i)) for i in selection_limiter.split(',')]
# Generate the fractal
print(selection_limiter)
mat_points = flam3.Fractal(point_count=point_count, selection_limiter=selection_limiter).execute()
# Convert fractal data to a matrix of color
img_mat = flam3.point_to_image_mat(mat_points)
img = flam3.mat_to_color(img_mat, colors=colors)
# Save data to BytesIO file object
im = Image.fromarray(img)
f = io.BytesIO()
im.save(f, format='png')
f.seek(0)
return jsonify(result="data:image/png;base64,"+base64.b64encode(f.read()).decode())
@main.route('/status/<task_id>')
def taskstatus(task_id):
task = long_task.AsyncResult(task_id)
if task.state == 'PENDING':
# job did not start yet
response = {
'state': task.state,
'current': 0,
'total': 1,
'status': 'Pending...'
}
elif task.state != 'FAILURE':
response = {
'state': task.state,
'current': task.info.get('current', 0),
'total': task.info.get('total', 1),
'status': task.info.get('status', '')
}
if 'result' in task.info:
response['result'] = task.info['result']
else:
# something went wrong in the background jobself.get
response = {
'state': task.state,
'current': 1,
'total': 1,
'status': str(task.info), # this is the exception raised
}
return jsonify(response)
@main.route('/<path:folder>/<path:path>/')
def page(folder, path):
return render_template('page.html', folder=folder, page=pages.get_or_404(folder, path), page_title=path)
@main.route('/<path:folder>/')
def folder(folder):
folder_dict = sorted(pages.get_or_404(folder=folder))
page_title = folder.replace('_', ' ').title()
return render_template('folder.html', folder=folder, pages=folder_dict, page_title=page_title)
@main.route('/topics/')
def folders():
return render_template('folders.html', folders=pages._pages)
| 2,686 | 0 | 243 |
bbe52c431dfd2064958b7c864f8d7ea5f4c87352 | 6,099 | py | Python | src/viektup.py | fyodr/kektuple | 5602134d9b784fbbbf1efd6afe7a0c523dded06a | [
"MIT"
] | null | null | null | src/viektup.py | fyodr/kektuple | 5602134d9b784fbbbf1efd6afe7a0c523dded06a | [
"MIT"
] | 3 | 2020-05-26T22:28:22.000Z | 2020-05-27T02:32:10.000Z | src/viektup.py | fyodr/kektuple | 5602134d9b784fbbbf1efd6afe7a0c523dded06a | [
"MIT"
] | null | null | null | """
viektup.py
by Ted Morin
viektup <= visual interactive kektup
a class for visualizing Barnette graphs via interactive visualization
"""
import numpy as np
from vektup import Vektup
from iektup import Iektup
# event to call when an edge is clicked on (or "picked")
# handle details of selecting an edge
# for VR? I do not know what this is doing
# for VR? I do not know what this is doing
if __name__ == '__main__':
v = Viektup(g = Iektup.random_graph(30, 0))
v.show_tutte_embedding(f=9)
input("exit on enter")
| 35.666667 | 76 | 0.571733 | """
viektup.py
by Ted Morin
viektup <= visual interactive kektup
a class for visualizing Barnette graphs via interactive visualization
"""
import numpy as np
from vektup import Vektup
from iektup import Iektup
class Viektup(Vektup):
def __init__(self, g=None,
showing_points=True,
showing_lines=True,
showing_polys=True,
showing_labels=True):
Vektup.__init__(self, g=g,
showing_points=showing_points,
showing_lines=showing_lines,
showing_polys=showing_polys,
showing_labels=showing_labels)
self.selected = ()
self.face_color_mode = 'proper'
self.fig.canvas.mpl_connect('pick_event', self.pick_event)
# use a more sophisticated update_visual method after the first call
self.update_visual = self.eventual_update_visual
def init_visual(self):
Vektup.init_visual(self)
self.indicator = self.ax.annotate("Pending",
np.array([.9,.9]))
def eventual_update_visual(self, face_color_mode = None):
if face_color_mode is not None:
self.face_color_mode = face_color_mode
if self.face_color_mode == 'cycle':
self.cycle_color_faces()
elif self.face_color_mode == 'proper':
self.properly_color_faces()
elif self.face_color_mode == 'ham':
pass # case handled below
else :
pass
is_ham = self.g.is_ham_cycle(self.g.active_edges)
if is_ham:
self.indicator.set_text('Hamiltonian')
self.indicator.set_color('g')
if self.face_color_mode == 'ham':
self.inside_outside_color_faces(self.g.active_edges)
else :
self.indicator.set_text('Not Hamiltonian')
self.indicator.set_color('r')
Vektup.update_visual(self)
# event to call when an edge is clicked on (or "picked")
def pick_event(self, event):
# make sure it is not just a vertex moving action
if event.mouseevent.button == 3:
return
# figure out who originated the event
if hasattr(event.artist, 'edge_num'):
self.edge_pick_event(event)
elif hasattr(event.artist, 'face_num'):
self.face_pick_event(event)
elif hasattr(event.artist, 'vert_num'):
return
#self.vert_pick_event(event)
self.update_visual()
def edge_pick_event(self, event):
# assumes that event originated with a line
edge_num = event.artist.edge_num
if event.mouseevent.key == 'control':
# select/deselect the edge
self.select(event.artist.edge_num)
else :
# add to/remove from active edges
if edge_num in self.g.active_edges:
self.g.active_edges.remove(edge_num)
else :
self.g.active_edges.add(edge_num)
def face_pick_event(self, event):
face_num = event.artist.face_num
for edge_num in self.g.faces[face_num].edges:
if edge_num in self.g.active_edges:
self.g.active_edges.remove(edge_num)
else :
self.g.active_edges.add(edge_num)
self.update_visual()
# print("Face Event by", face_num)
# self.faces[face_num].set_visible(False)
# handle details of selecting an edge
def select(self, edge_num):
if edge_num in self.selected: # remove the edge
if len(self.selected) == 1:
self.selected = ()
else :
ix = self.selected.index(edge_num)
self.selected = self.selected[:ix] + self.selected[ix+1:]
else :
if self.selected:
self.selected = (self.selected[-1], edge_num)
else :
self.selected = (edge_num,)
self.update_visual()
def cycle_color_faces(self, update = False):
for f in range(len(self.g.faces)):
if self.g.face_has_alternating_edges(f, self.g.active_edges):
self.face_colors[f] = 'g'
elif self.g.face_has_all_edges(f, self.g.active_edges):
self.face_colors[f] = 'r'
else :
self.face_colors[f] = 'y'
if update :
self.update_visual()
def inside_outside_color_faces(self, ham, update = False,
inside_color='r', outside_color='g'):
A, B = self.g.ham_to_permeating_tree(ham)
for f in range(len(self.g.faces)):
if f in A :
self.face_colors[f] = inside_color
else :
self.face_colors[f] = outside_color
if update :
self.update_visual()
# for VR? I do not know what this is doing
def color_edges(self):
for e, edge in enumerate(self.edges):
if self.edge_counts[e] == 1:
edge.set_color('b')
edge.set_linewidth(self.active_line_width)
elif self.edge_counts[e] == 0:
edge.set_color('g')
edge.set_linewidth(self.default_line_width)
elif self.edge_counts[e] == 2:
edge.set_color('r')
edge.set_linewidth(self.default_line_width)
# for VR? I do not know what this is doing
def flip_colors(self):
for e, edge in enumerate(self.edges):
if self.edge_counts[e] == 0:
if self.flip_flag == True:
edge.set_color('r')
else:
edge.set_color('g')
edge.set_linewidth(self.default_line_width)
elif self.edge_counts[e] == 2:
if self.flip_flag == True:
edge.set_color('g')
else:
edge.set_color('r')
self.flip_flag = not(self.flip_flag)
if __name__ == '__main__':
v = Viektup(g = Iektup.random_graph(30, 0))
v.show_tutte_embedding(f=9)
input("exit on enter")
| 5,224 | 1 | 315 |
8fd15d8d08dc9ac65b46cdb43fd15e5581c97ffc | 127 | py | Python | stubs/asttokens/util.py | jamescooke/flake8-aaa | 9df248e10538946531b67da4564bb229a91baece | [
"MIT"
] | 44 | 2018-04-08T21:25:43.000Z | 2022-01-20T14:28:16.000Z | stubs/asttokens/util.py | jamescooke/flake8-aaa | 9df248e10538946531b67da4564bb229a91baece | [
"MIT"
] | 72 | 2018-03-30T14:30:48.000Z | 2022-03-31T16:18:16.000Z | stubs/asttokens/util.py | jamescooke/flake8-aaa | 9df248e10538946531b67da4564bb229a91baece | [
"MIT"
] | 1 | 2018-10-17T18:49:25.000Z | 2018-10-17T18:49:25.000Z | import collections
| 21.166667 | 97 | 0.740157 | import collections
class Token(collections.namedtuple('Token', 'type string start end line index startpos endpos')):
...
| 0 | 84 | 23 |
1995081178b8e9fad8c0b013bb69eea59dd02469 | 374 | py | Python | examples/annotations/upload_annotation_to_dataset.py | dataloop-ai/sdk_examples | 422d5629df5af343d2dc275e9570bb83c4e2f49d | [
"MIT"
] | 3 | 2022-01-07T20:33:49.000Z | 2022-03-22T12:41:30.000Z | examples/annotations/upload_annotation_to_dataset.py | dataloop-ai/sdk_examples | 422d5629df5af343d2dc275e9570bb83c4e2f49d | [
"MIT"
] | null | null | null | examples/annotations/upload_annotation_to_dataset.py | dataloop-ai/sdk_examples | 422d5629df5af343d2dc275e9570bb83c4e2f49d | [
"MIT"
] | 3 | 2021-12-29T13:11:30.000Z | 2022-03-22T12:25:50.000Z | import dtlpy as dl
dataset_id = ''
annotations_path = r''
# make sure they have the same hierarchy
dataset = dl.datasets.get(dataset_id=dataset_id)
# clean: bool - if True it remove the old annotations
# remote_root_path: str - the remote root path to match remote and local items
dataset.upload_annotations(local_path=annotations_path, clean=False, remote_root_path='/')
| 34 | 90 | 0.783422 | import dtlpy as dl
dataset_id = ''
annotations_path = r''
# make sure they have the same hierarchy
dataset = dl.datasets.get(dataset_id=dataset_id)
# clean: bool - if True it remove the old annotations
# remote_root_path: str - the remote root path to match remote and local items
dataset.upload_annotations(local_path=annotations_path, clean=False, remote_root_path='/')
| 0 | 0 | 0 |
1c6e4a81a173fa18b5f8b7f938e0d2aac2fb1994 | 436 | py | Python | examples/volumetric/tet_threshold.py | evanphilip/vedo | e8504fb1a7d2cb667a776180d69bb17cad634e1e | [
"CC0-1.0"
] | 836 | 2020-06-14T02:38:12.000Z | 2022-03-31T15:39:50.000Z | examples/volumetric/tet_threshold.py | evanphilip/vedo | e8504fb1a7d2cb667a776180d69bb17cad634e1e | [
"CC0-1.0"
] | 418 | 2020-06-14T10:51:32.000Z | 2022-03-31T23:23:14.000Z | examples/volumetric/tet_threshold.py | evanphilip/vedo | e8504fb1a7d2cb667a776180d69bb17cad634e1e | [
"CC0-1.0"
] | 136 | 2020-06-14T02:26:41.000Z | 2022-03-31T12:47:18.000Z | """Threshold the original TetMesh
with a scalar array"""
from vedo import *
settings.useDepthPeeling = True
tetm = TetMesh(dataurl+'limb_ugrid.vtk')
tetm.color('prism').alpha([0,1])
# Threshold the tetrahedral mesh for values in the range:
tetm.threshold(above=0.9, below=1)
tetm.addScalarBar3D(title='chem_0 expression levels', c='k', italic=1)
show([(tetm,__doc__),
tetm.tomesh(shrink=0.9),
], N=2, axes=1,
).close()
| 24.222222 | 71 | 0.704128 | """Threshold the original TetMesh
with a scalar array"""
from vedo import *
settings.useDepthPeeling = True
tetm = TetMesh(dataurl+'limb_ugrid.vtk')
tetm.color('prism').alpha([0,1])
# Threshold the tetrahedral mesh for values in the range:
tetm.threshold(above=0.9, below=1)
tetm.addScalarBar3D(title='chem_0 expression levels', c='k', italic=1)
show([(tetm,__doc__),
tetm.tomesh(shrink=0.9),
], N=2, axes=1,
).close()
| 0 | 0 | 0 |
8232a837d37bf5ce21f07e2fbce3a35ef2d5d563 | 373 | py | Python | test/client/post_client.py | lidall/risk-authentication-service | 17e59dc264618a691767b5e271ac170b4178eb6f | [
"MIT"
] | null | null | null | test/client/post_client.py | lidall/risk-authentication-service | 17e59dc264618a691767b5e271ac170b4178eb6f | [
"MIT"
] | null | null | null | test/client/post_client.py | lidall/risk-authentication-service | 17e59dc264618a691767b5e271ac170b4178eb6f | [
"MIT"
] | null | null | null | import asyncio
import aiohttp
text = asyncio.run(main()) # Assuming you're using python 3.7+
print(text)
| 24.866667 | 70 | 0.619303 | import asyncio
import aiohttp
async def main():
url = 'http://0.0.0.0:8080/log'
with open('data.txt', 'rb') as f:
async with aiohttp.ClientSession() as session:
async with session.post(url, data={'key': f}) as response:
return await response.text()
text = asyncio.run(main()) # Assuming you're using python 3.7+
print(text)
| 241 | 0 | 23 |
0f1ba0f472d43145fe9d19222d09e17981af0786 | 1,934 | py | Python | lldb/scripts/copy-static-bindings.py | LaudateCorpus1/llvm-project-staging | cc926dc3a87af7023aa9b6c392347a0a8ed6949b | [
"Apache-2.0"
] | null | null | null | lldb/scripts/copy-static-bindings.py | LaudateCorpus1/llvm-project-staging | cc926dc3a87af7023aa9b6c392347a0a8ed6949b | [
"Apache-2.0"
] | null | null | null | lldb/scripts/copy-static-bindings.py | LaudateCorpus1/llvm-project-staging | cc926dc3a87af7023aa9b6c392347a0a8ed6949b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""SWIG Static Binding Copier
This script copies over the Python bindings generated by SWIG from the build
directory to the source directory. Before using the script, make sure you have
LLDB_USE_STATIC_BINDINGS set to OFF by looking at CMakeCache.txt in the LLDB
build directory.
The scripts knows the location of the static bindings in the source directory
based on its own location. The build directory must be specified as a position
argument.
$ copy-static-bindings.py <path to LLDB build directory>
Run this script whenever you're changing any of the .i interface files in the
bindings directory.
"""
import argparse
import os
import sys
import shutil
if __name__ == "__main__":
main()
| 32.233333 | 78 | 0.668046 | #!/usr/bin/env python
"""SWIG Static Binding Copier
This script copies over the Python bindings generated by SWIG from the build
directory to the source directory. Before using the script, make sure you have
LLDB_USE_STATIC_BINDINGS set to OFF by looking at CMakeCache.txt in the LLDB
build directory.
The scripts knows the location of the static bindings in the source directory
based on its own location. The build directory must be specified as a position
argument.
$ copy-static-bindings.py <path to LLDB build directory>
Run this script whenever you're changing any of the .i interface files in the
bindings directory.
"""
import argparse
import os
import sys
import shutil
def main():
parser = argparse.ArgumentParser(description='Copy the static bindings')
parser.add_argument('build_dir',
type=str,
help='Path to the root of the LLDB build directory')
args = parser.parse_args()
build_dir = args.build_dir
if not os.path.exists(build_dir):
print("error: the build directory does not exist: {}".format(
args.build_dir))
sys.exit(1)
source_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if not os.path.exists(source_dir):
print("error: the source directory does not exist: {}".format(
source_dir))
sys.exit(1)
binding_build_dir = os.path.join(build_dir, 'bindings', 'python')
binding_source_dir = os.path.join(source_dir, 'bindings', 'python',
'static-binding')
for root, _, files in os.walk(binding_build_dir):
for file in files:
_, extension = os.path.splitext(file)
filepath = os.path.join(root, file)
if extension == '.py' or extension == '.cpp':
shutil.copy(filepath, os.path.join(binding_source_dir, file))
if __name__ == "__main__":
main()
| 1,185 | 0 | 23 |
2854f114e96f2c3f14e1c5c147096d7b6d434e74 | 193 | py | Python | tests/requests/req-head.py | Team-Fenris/tfcctrl | d7af8750fddb7d09f6ee3830d9703c5356b9ef13 | [
"Apache-2.0"
] | 1 | 2021-12-28T17:07:21.000Z | 2021-12-28T17:07:21.000Z | tests/requests/req-head.py | Team-Fenris/tfcctrl | d7af8750fddb7d09f6ee3830d9703c5356b9ef13 | [
"Apache-2.0"
] | null | null | null | tests/requests/req-head.py | Team-Fenris/tfcctrl | d7af8750fddb7d09f6ee3830d9703c5356b9ef13 | [
"Apache-2.0"
] | null | null | null | import requests
# Request URL from the user input
url = input("Insert URL: ")
# Set up and make the test ready for print
x = requests.head(url)
# Print the callback
print(x.headers) | 19.3 | 43 | 0.694301 | import requests
# Request URL from the user input
url = input("Insert URL: ")
# Set up and make the test ready for print
x = requests.head(url)
# Print the callback
print(x.headers) | 0 | 0 | 0 |
a0eaa0066688a682511c63cb1875814600dd18a3 | 205 | py | Python | fec_raw/tests.py | datadesk/django-fec-raw-data | 9d1f49e5ecc1552c55b635c63c1bf021871e4c0b | [
"MIT"
] | 3 | 2016-06-01T18:16:36.000Z | 2021-07-20T14:51:40.000Z | fec_raw/tests.py | datadesk/django-fec-raw-data | 9d1f49e5ecc1552c55b635c63c1bf021871e4c0b | [
"MIT"
] | 9 | 2015-11-24T06:22:56.000Z | 2021-06-10T17:45:57.000Z | fec_raw/tests.py | datadesk/django-fec-raw-data | 9d1f49e5ecc1552c55b635c63c1bf021871e4c0b | [
"MIT"
] | 1 | 2020-12-01T21:22:53.000Z | 2020-12-01T21:22:53.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
| 18.636364 | 39 | 0.697561 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
class FecTest(TestCase):
def test_fake(self):
self.assertEqual(2+2, 4)
| 32 | 3 | 50 |
345988313f381b37b27d23851276e597d1cfa427 | 1,432 | py | Python | tests/test_fix_print.py | graingert/python-modernize | 028d13416d7abe4b8b39bc21e6425df65c7836c0 | [
"BSD-3-Clause"
] | 220 | 2015-03-25T11:06:13.000Z | 2020-08-19T13:33:57.000Z | tests/test_fix_print.py | graingert/python-modernize | 028d13416d7abe4b8b39bc21e6425df65c7836c0 | [
"BSD-3-Clause"
] | 113 | 2015-01-03T18:05:27.000Z | 2020-08-18T21:42:23.000Z | tests/test_fix_print.py | graingert/python-modernize | 028d13416d7abe4b8b39bc21e6425df65c7836c0 | [
"BSD-3-Clause"
] | 39 | 2015-01-18T10:08:52.000Z | 2020-07-12T18:44:40.000Z | from __future__ import generator_stop
from utils import check_on_input
PRINT_BARE = (
"""\
print
""",
"""\
from __future__ import print_function
print()
""",
)
PRINT_SIMPLE = (
"""\
print 'Hello'
""",
"""\
from __future__ import print_function
print('Hello')
""",
)
PRINT_MULTIPLE = (
"""\
print 'Hello', 'world'
""",
"""\
from __future__ import print_function
print('Hello', 'world')
""",
)
PRINT_WITH_PARENS = (
"""\
print('Hello')
""",
"""\
from __future__ import print_function
print('Hello')
""",
)
PRINT_WITH_COMMA = (
"""\
print 'Hello',
""",
"""\
from __future__ import print_function
print('Hello', end=' ')
""",
)
PRINT_TO_STREAM = (
"""\
print >>x, 'Hello'
""",
"""\
from __future__ import print_function
print('Hello', file=x)
""",
)
PRINT_TO_STREAM_WITH_COMMA = (
"""\
print >>x, 'Hello',
""",
"""\
from __future__ import print_function
print('Hello', end=' ', file=x)
""",
)
| 14.039216 | 47 | 0.65852 | from __future__ import generator_stop
from utils import check_on_input
PRINT_BARE = (
"""\
print
""",
"""\
from __future__ import print_function
print()
""",
)
PRINT_SIMPLE = (
"""\
print 'Hello'
""",
"""\
from __future__ import print_function
print('Hello')
""",
)
PRINT_MULTIPLE = (
"""\
print 'Hello', 'world'
""",
"""\
from __future__ import print_function
print('Hello', 'world')
""",
)
PRINT_WITH_PARENS = (
"""\
print('Hello')
""",
"""\
from __future__ import print_function
print('Hello')
""",
)
PRINT_WITH_COMMA = (
"""\
print 'Hello',
""",
"""\
from __future__ import print_function
print('Hello', end=' ')
""",
)
PRINT_TO_STREAM = (
"""\
print >>x, 'Hello'
""",
"""\
from __future__ import print_function
print('Hello', file=x)
""",
)
PRINT_TO_STREAM_WITH_COMMA = (
"""\
print >>x, 'Hello',
""",
"""\
from __future__ import print_function
print('Hello', end=' ', file=x)
""",
)
def test_print_bare():
check_on_input(*PRINT_BARE)
def test_print_simple():
check_on_input(*PRINT_SIMPLE)
def test_print_multiple():
check_on_input(*PRINT_MULTIPLE)
def test_print_with_parens():
check_on_input(*PRINT_WITH_PARENS)
def test_print_with_comma():
check_on_input(*PRINT_WITH_COMMA)
def test_print_to_stream():
check_on_input(*PRINT_TO_STREAM)
def test_print_to_stream_with_comma():
check_on_input(*PRINT_TO_STREAM_WITH_COMMA)
| 311 | 0 | 161 |
d736de07b87063cbed39e7376c3caac6450912bb | 490 | py | Python | app/server/auth/__init__.py | tderleth/2-item-catalog | 168e8f5ad10a26a03f6c50b1b2173de0b5dde113 | [
"MIT"
] | null | null | null | app/server/auth/__init__.py | tderleth/2-item-catalog | 168e8f5ad10a26a03f6c50b1b2173de0b5dde113 | [
"MIT"
] | null | null | null | app/server/auth/__init__.py | tderleth/2-item-catalog | 168e8f5ad10a26a03f6c50b1b2173de0b5dde113 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""Initialize package."""
from flask import redirect, url_for
from flask import session as login_session
from functools import wraps
def login_required(f):
"""Check if user is authenticated."""
@wraps(f)
return wrap
| 23.333333 | 50 | 0.632653 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""Initialize package."""
from flask import redirect, url_for
from flask import session as login_session
from functools import wraps
def login_required(f):
"""Check if user is authenticated."""
@wraps(f)
def wrap(*args, **kwargs):
if login_session.get('auth'):
if login_session['auth'] is True:
return f(*args, **kwargs)
return redirect(url_for('auth.showLogin'))
return wrap
| 182 | 0 | 26 |
ca1e6d19ff949ebaf3c6b29a5cb5dea9ff1c5275 | 240 | py | Python | cli_tests/nb_config.py | jbn/dissertate | a3e258b8686408b28fec13ba300e77d466465e5b | [
"MIT"
] | 2 | 2019-03-08T14:24:11.000Z | 2019-07-11T15:13:07.000Z | cli_tests/nb_config.py | jbn/dissertate | a3e258b8686408b28fec13ba300e77d466465e5b | [
"MIT"
] | 1 | 2017-04-30T18:04:19.000Z | 2017-06-15T22:28:53.000Z | cli_tests/nb_config.py | jbn/dissertate | a3e258b8686408b28fec13ba300e77d466465e5b | [
"MIT"
] | null | null | null | import dissertate
c = get_config()
c.Exporter.preprocessors = ['dissertate.preprocessors.CellElider',
'dissertate.preprocessors.EmptyCellElider']
c.Exporter.template_file = dissertate.markdown_template_path()
| 26.666667 | 71 | 0.725 | import dissertate
c = get_config()
c.Exporter.preprocessors = ['dissertate.preprocessors.CellElider',
'dissertate.preprocessors.EmptyCellElider']
c.Exporter.template_file = dissertate.markdown_template_path()
| 0 | 0 | 0 |
ba57317d47dd39595ebadff7a609644ff3f9eb12 | 19,205 | py | Python | responder/aws/function/responder.py | gracious-tech/stello | 1b7b1b28c76c38eede4abef308cb981e26f068b8 | [
"MIT"
] | 1 | 2021-11-04T11:36:12.000Z | 2021-11-04T11:36:12.000Z | responder/aws/function/responder.py | gracious-tech/stello | 1b7b1b28c76c38eede4abef308cb981e26f068b8 | [
"MIT"
] | 7 | 2021-07-29T06:26:06.000Z | 2021-11-19T01:42:25.000Z | responder/aws/function/responder.py | gracious-tech/stello | 1b7b1b28c76c38eede4abef308cb981e26f068b8 | [
"MIT"
] | null | null | null |
import os
import json
import base64
import string
from time import time
from uuid import uuid4
from pathlib import Path
from traceback import format_exc
from contextlib import suppress
import rollbar
import boto3
from botocore.config import Config
from cryptography.hazmat.primitives.hashes import SHA256
from cryptography.hazmat.primitives.serialization import load_der_public_key
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from cryptography.hazmat.primitives.asymmetric.padding import OAEP, MGF1
from email_template import generate_email
# Constants
VALID_TYPES = ('read', 'reply', 'reaction', 'subscription', 'address', 'resend')
# A base64-encoded 3w1h solid #ddeeff jpeg
EXPIRED_IMAGE = '/9j/4AAQSkZJRgABAQEBLAEsAAD/2wBDAAoHBwgHBgoICAgLCgoLDhgQDg0NDh0VFhEYIx8lJCIfIiEmKzcvJik0KSEiMEExNDk7Pj4+JS5ESUM8SDc9Pjv/2wBDAQoLCw4NDhwQEBw7KCIoOzs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozv/wAARCAABAAMDAREAAhEBAxEB/8QAFAABAAAAAAAAAAAAAAAAAAAAB//EABQQAQAAAAAAAAAAAAAAAAAAAAD/xAAUAQEAAAAAAAAAAAAAAAAAAAAE/8QAFBEBAAAAAAAAAAAAAAAAAAAAAP/aAAwDAQACEQMRAD8AViR3/9k='
# Sym encryption settings (same as js version)
SYM_IV_BYTES = 12
SYM_TAG_BITS = 128 # Tag is 128 bits by default in AESGCM and not configurable
SYM_KEY_BITS = 256
# Config from env
ENV = os.environ['stello_env']
DEV = ENV == 'development'
VERSION = os.environ['stello_version']
MSGS_BUCKET = os.environ['stello_msgs_bucket']
RESP_BUCKET = MSGS_BUCKET + '-stello-resp'
REGION = os.environ['stello_region']
ROLLBAR_TOKEN = os.environ['stello_rollbar_responder'] # Client token (not server) as public
# Optional config
SELF_HOSTED = not os.environ.get('stello_domain_branded')
if SELF_HOSTED:
TOPIC_ARN = os.environ['stello_topic_arn']
else:
DOMAIN_BRANDED = os.environ['stello_domain_branded']
DOMAIN_GENERIC = os.environ['stello_domain_generic']
# Setup Rollbar
# WARN Must use blocking handler, otherwise lambda may finish before report is sent
# NOTE Version prefixed with 'v' so that traces match github tags
# SECURITY Don't expose local vars in report as could contain sensitive user content
rollbar.init(ROLLBAR_TOKEN, ENV, handler='blocking', code_version='v'+VERSION,
locals={'enabled': False}, root=str(Path(__file__).parent), enabled=not DEV)
rollbar.events.add_payload_handler(_rollbar_add_context)
# Access to AWS services
# NOTE Important to set region to avoid unnecessary redirects for e.g. s3
AWS_CONFIG = Config(region_name=REGION)
S3 = boto3.client('s3', config=AWS_CONFIG)
def entry(api_event, context):
"""Entrypoint that wraps main logic to add exception handling and CORS headers"""
# Handle GET requests (which don't send origin header so can't detect user)
if api_event['requestContext']['http']['method'] == 'GET':
try:
if api_event['requestContext']['http']['path'] == '/inviter/image':
return inviter_image(api_event)
# NOTE A number of companies crawl AWS services, so don't warn for invalid paths
raise Abort()
except Abort:
return {'statusCode': 400}
except:
# SECURITY Never reveal whether client or server error, just that it didn't work
_report_error(api_event)
return {'statusCode': 400}
# Determine expected origin (and detect user)
# NOTE Access-Control-Allow-Origin can only take one value, so must detect right one
if SELF_HOSTED:
user = '_user'
allowed_origin = f'https://{MSGS_BUCKET}.s3-{REGION}.amazonaws.com'
else:
# Hosted setup -- origin must be a subdomain of one of defined domains
user, _, root_origin = api_event['headers']['origin'].partition('//')[2].partition('.')
allowed_root = DOMAIN_GENERIC if root_origin == DOMAIN_GENERIC else DOMAIN_BRANDED
allowed_origin = f'https://{user}.{allowed_root}'
# If origin not allowed, 403 to prevent further processing of the request
if not DEV and api_event['headers']['origin'] != allowed_origin:
return {'statusCode': 403}
# Process event and catch exceptions
try:
response = _entry(api_event, user)
except Abort:
response = {'statusCode': 400}
except:
# SECURITY Never reveal whether client or server error, just that it didn't work
_report_error(api_event)
response = {'statusCode': 400}
# Add CORS headers so cross-domain request doesn't fail
response.setdefault('headers', {})
response['headers']['Access-Control-Allow-Origin'] = '*' if DEV else allowed_origin
if api_event['requestContext']['http']['method'] == 'OPTIONS':
response['headers']['Access-Control-Allow-Methods'] = 'GET,POST'
response['headers']['Access-Control-Allow-Headers'] = '*'
return response
def _entry(api_event, user):
"""Main processing logic
NOTE api_event format and response expected below
https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-develop-integrations-lambda.html
SECURITY Assume input may be malicious
SECURITY Never return anything back to recipient other than success status
Event data is expected to be: {
'config_secret': string,
'encrypted': string,
...
}
Data saved to response bucket is: encrypted JSON {
'event': ...,
'ip': string,
}
"""
# Handle CORS OPTIONS requests
if api_event['requestContext']['http']['method'] == 'OPTIONS':
return {'statusCode': 200}
# Handle POST requests
ip = api_event['requestContext']['http']['sourceIp']
event = json.loads(api_event['body'])
# These keys are required for all responses
_ensure_type(event, 'config_secret', str)
_ensure_type(event, 'encrypted', str)
# Load config (required to encrypt stored data, so can't do anything without)
config = _get_config(user, event['config_secret'])
# Get event type from path
resp_type = api_event['requestContext']['http']['path'].partition('/responder/')[2]
if resp_type not in VALID_TYPES:
raise Exception(f"Invalid value for response type: {resp_type}")
# Handle the event and then store it
handler = globals()[f'handle_{resp_type}']
handler(user, config, event)
_put_resp(config, resp_type, event, ip, user)
# Report success
return {'statusCode': 200}
# POST HANDLERS
def handle_read(user, config, event):
"""Delete message if reached max reads, otherwise increase read count
SECURITY While an attacker could circumvent this or send fake msg ids, there isn't much risk
This is mainly for triggering a delete if message shared widely when not permitted
Actual attackers would only need a single read anyway
For example, could be more reliable if separate lambda triggered by bucket requests
But more reliable doesn't necessarily mean more secure
resp_token is also still used Stello-side to verify reads
"""
# Expected fields
# SECURITY Yes attacker could change these value themselves but see above
_ensure_type(event, 'copy_id', str)
_ensure_type(event, 'has_max_reads', bool)
# Don't need to do anything if not tracking max reads
if not event['has_max_reads']:
return
# Get copies's tags
copy_key = f"messages/{user}/copies/{event['copy_id']}"
try:
resp = S3.get_object_tagging(Bucket=MSGS_BUCKET, Key=copy_key)
except S3.exceptions.NoSuchKey:
return # If msg already deleted, no reason to do any further processing (still report resp)
tags = {d['Key']: d['Value'] for d in resp['TagSet']}
# Parse and increase reads
reads = int(tags['stello-reads'])
max_reads = int(tags['stello-max-reads'])
reads += 1
tags['stello-reads'] = str(reads)
# Either delete message or update reads
if reads >= max_reads:
S3.delete_object(Bucket=MSGS_BUCKET, Key=copy_key)
# Also delete invite image
S3.delete_object(Bucket=MSGS_BUCKET, Key=f"messages/{user}/invite_images/{event['copy_id']}")
else:
S3.put_object_tagging(
Bucket=MSGS_BUCKET,
Key=copy_key,
Tagging={
# WARN MUST preserve other tags (like stello-lifespan!)
'TagSet': [{'Key': k, 'Value': v} for k, v in tags.items()],
},
)
def handle_reply(user, config, event):
"""Notify user of replies to their messages"""
if not config['allow_replies']:
raise Abort()
_send_notification(config, 'reply', event, user)
def handle_reaction(user, config, event):
"""Notify user of reactions to their messages"""
# Shouldn't be getting reactions if disabled them
if not config['allow_reactions']:
raise Abort()
# Ensure reaction is a short single hyphenated word if present (or null)
# SECURITY Prevents inserting long messages as a "reaction" but allows future codes too
# Noting that user may have enabled notifications for reactions, putting their value in emails
if 'content' in event and event['content'] is not None:
_ensure_valid_chars(event, 'content', string.ascii_letters + string.digits + '-')
if len(event['content']) > 25:
raise Exception("Reaction content too long")
_send_notification(config, 'reaction', event, user)
def handle_subscription(user, config, event):
"""Subscription modifications don't need any processing"""
def handle_address(user, config, event):
"""Subscription address modifications don't need any processing"""
def handle_resend(user, config, event):
"""Handle resend requests"""
if not config['allow_resend_requests']:
raise Abort()
_send_notification(config, 'resend', event, user)
def handle_delete(user, config, event):
# TODO Review this (event type currently disabled)
"""Handle a request to delete the recipient's copy of the message
SECURITY Stello config not checked, so technically recipient could delete manually even if the
option to is not presented in the message. Not considered a security risk.
SECURITY Recipient could technically delete any message copy
Since copies have unique ids, considered low risk, as they would only know their own
SECURITY Ensure this lambda fn can only delete messages (not other objects in bucket)
"""
copy_id = event['copy_id']
with suppress(S3.exceptions.NoSuchKey): # Already deleted is not a failure
S3.delete_object(Bucket=MSGS_BUCKET, Key=f'messages/{user}/copies/{copy_id}')
# HELPERS
class Abort(Exception):
"""Abort and respond with failure, but don't report any error"""
def _url64_to_bytes(url64_string):
"""Convert custom-url-base64 encoded string to bytes"""
return base64.urlsafe_b64decode(url64_string.replace('~', '='))
def _bytes_to_url64(bytes_data):
"""Convert bytes to custom-url-base64 encoded string"""
return base64.urlsafe_b64encode(bytes_data).decode().replace('=', '~')
def _get_config(user, secret):
"""Download, decrypt and parse responder config"""
encrypted = S3.get_object(Bucket=RESP_BUCKET, Key=f'config/{user}/config')['Body'].read()
decryptor = AESGCM(_url64_to_bytes(secret))
decrypted = decryptor.decrypt(encrypted[:SYM_IV_BYTES], encrypted[SYM_IV_BYTES:], None)
return json.loads(decrypted)
def _ensure_type(event, key, type_):
"""Ensure key's value is of given type"""
if not isinstance(event.get(key), type_):
raise Exception(f"Invalid or missing value for '{key}'")
def _ensure_valid_chars(event, key, valid_chars):
"""Ensure key's value is a string made of valid chars"""
_ensure_type(event, key, str)
if not event[key]:
raise Exception(f"Empty string for '{key}'")
for char in event[key]:
if char not in valid_chars:
raise Exception(f"Invalid character '{char}' in {key}")
def _report_error(api_event):
"""Report error"""
print(format_exc())
# Add request metadata if available
payload_data = {}
try:
payload_data = {
'request': {
'user_ip': api_event['requestContext']['http']['sourceIp'],
'headers': {
'User-Agent': api_event['requestContext']['http']['userAgent'],
},
},
}
except:
pass
# Send to Rollbar
rollbar.report_exc_info(payload_data=payload_data)
def _put_resp(config, resp_type, event, ip, user):
"""Save response object with encrypted data
SECURITY Ensure objects can't be placed in other dirs which app would never download
"""
# Work out object id
# Timestamp prefix for order, uuid suffix for uniqueness
object_id = f'responses/{user}/{resp_type}/{int(time())}_{uuid4()}'
# Encode data
data = json.dumps({
'event': event,
'ip': ip,
}).encode()
# Decode asym public key and setup asym encrypter
asym_key = _url64_to_bytes(config['resp_key_public'])
asym_encryter = load_der_public_key(asym_key)
# Generate sym key and encrypted form of it
sym_key = AESGCM.generate_key(SYM_KEY_BITS)
encrypted_key = asym_encryter.encrypt(sym_key, OAEP(MGF1(SHA256()), SHA256(), None))
# Encrypt data and produce output
sym_encrypter = AESGCM(sym_key)
iv = os.urandom(SYM_IV_BYTES)
encrypted_data = iv + sym_encrypter.encrypt(iv, data, None)
output = json.dumps({
'encrypted_data': _bytes_to_url64(encrypted_data),
'encrypted_key': _bytes_to_url64(encrypted_key),
})
# Store in bucket
S3.put_object(Bucket=RESP_BUCKET, Key=object_id, Body=output.encode())
def _count_resp_objects(user, resp_type):
"""Return a count of stored objects for the given response type
TODO Paginates at 1000, which may be a concern when counting reactions for popular users
"""
resp = S3.list_objects_v2(
Bucket=RESP_BUCKET,
Prefix=f'responses/{user}/{resp_type}/',
)
return resp['KeyCount']
def _send_notification(config, resp_type, event, user):
"""Notify user of replies/reactions/resends for their messages (if configured to)
Notify modes: none, first_new_reply, replies, replies_and_reactions
Including contents only applies to: replies, replies_and_reactions
"""
# Determine if a reaction or reply/resend
# NOTE To keep things simple, resends are considered "replies" for purpose of notifications
reaction = resp_type == 'reaction'
# Do nothing if notifications disabled
if config['notify_mode'] == 'none':
return
if reaction and config['notify_mode'] != 'replies_and_reactions':
return
# Ensure notify_include_contents takes into account notify_mode
if config['notify_mode'] == 'first_new_reply':
config['notify_include_contents'] = False
# Prepare message
# NOTE Possible to have race condition where contents should be included but isn't, so check
if config['notify_include_contents'] and 'content' in event:
# If content is null, just clearing a previous reaction, so don't notify
if event['content'] is None:
return
subject = "Stello: New reaction" if reaction else "Stello: New reply"
heading = "Someone reacted with:" if reaction else "Someone replied with:"
msg = event['content']
if SELF_HOSTED:
msg += "\n" * 10
msg += (
"#### MESSAGE END ####\n"
"Open Stello to identify who responded and to reply to them"
" (not possible via email for security reasons)."
" Ignore storage provider's notes below."
" Instead, change notification settings in Stello."
)
else:
# Work out counts
reply_count = _count_resp_objects(user, 'reply') + _count_resp_objects(user, 'resend')
reaction_count = _count_resp_objects(user, 'reaction')
if reaction:
reaction_count += 1
else:
reply_count += 1
# If notify_mode is first_new_reply then only continue if this is the first
# NOTE Already returned if a reaction and in this notify_mode
if config['notify_mode'] == 'first_new_reply' and reply_count != 1:
return
# Work out summary line (for both subject and msg)
reply_s = "reply" if reply_count == 1 else "replies"
reaction_s = "reaction" if reaction_count == 1 else "reactions"
summary = ""
if reply_count:
summary += f"{reply_count} new {reply_s}"
if reply_count and reaction_count:
summary += " and "
if reaction_count:
summary += f"{reaction_count} new {reaction_s}"
# Work out subject and heading
subject = "Stello: " + summary
heading = f"You have {summary} to your messages"
# Work out msg
msg = ""
if SELF_HOSTED:
msg += "Open Stello to see them"
msg += "\n" * 10
msg += "Ignore storage provider's notes below. Instead, change notification settings in Stello."
# In case multiple sending profiles, note the bucket name in the subject
subject += f" [{MSGS_BUCKET if SELF_HOSTED else user}]"
# Send notification
if not DEV:
if SELF_HOSTED:
boto3.client('sns', config=AWS_CONFIG).publish(
TopicArn=TOPIC_ARN, Subject=subject, Message=f'{heading}\n\n\n{msg}')
else:
boto3.client('ses', config=AWS_CONFIG).send_email(
Source=f"Stello <no-reply@{DOMAIN_BRANDED}>",
Destination={'ToAddresses': [config['email']]},
Message={
'Subject': {
'Data': subject,
'Charset': 'UTF-8',
},
'Body': {
'Html': {
'Data': generate_email(heading, msg),
'Charset': 'UTF-8',
},
},
},
)
# INVITER
def inviter_image(api_event):
"""Decrypt and respond with invite image"""
params = api_event.get('queryStringParameters', {})
user = params['user']
copy_id = params['copy']
secret = params['k']
bucket_key = f'messages/{user}/invite_images/{copy_id}'
try:
obj = S3.get_object(Bucket=MSGS_BUCKET, Key=bucket_key)
except:
body = EXPIRED_IMAGE
else:
encrypted = obj['Body'].read()
decryptor = AESGCM(_url64_to_bytes(secret))
decrypted = decryptor.decrypt(encrypted[:SYM_IV_BYTES], encrypted[SYM_IV_BYTES:], None)
body = base64.b64encode(decrypted).decode()
return {
'statusCode': 200,
'headers': {
'Content-Type': 'image/jpeg',
'Cache-Control': 'no-store',
},
'isBase64Encoded': True,
'body': body,
}
| 35.964419 | 398 | 0.664358 |
import os
import json
import base64
import string
from time import time
from uuid import uuid4
from pathlib import Path
from traceback import format_exc
from contextlib import suppress
import rollbar
import boto3
from botocore.config import Config
from cryptography.hazmat.primitives.hashes import SHA256
from cryptography.hazmat.primitives.serialization import load_der_public_key
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from cryptography.hazmat.primitives.asymmetric.padding import OAEP, MGF1
from email_template import generate_email
# Constants
VALID_TYPES = ('read', 'reply', 'reaction', 'subscription', 'address', 'resend')
# A base64-encoded 3w1h solid #ddeeff jpeg
EXPIRED_IMAGE = '/9j/4AAQSkZJRgABAQEBLAEsAAD/2wBDAAoHBwgHBgoICAgLCgoLDhgQDg0NDh0VFhEYIx8lJCIfIiEmKzcvJik0KSEiMEExNDk7Pj4+JS5ESUM8SDc9Pjv/2wBDAQoLCw4NDhwQEBw7KCIoOzs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozv/wAARCAABAAMDAREAAhEBAxEB/8QAFAABAAAAAAAAAAAAAAAAAAAAB//EABQQAQAAAAAAAAAAAAAAAAAAAAD/xAAUAQEAAAAAAAAAAAAAAAAAAAAE/8QAFBEBAAAAAAAAAAAAAAAAAAAAAP/aAAwDAQACEQMRAD8AViR3/9k='
# Sym encryption settings (same as js version)
SYM_IV_BYTES = 12
SYM_TAG_BITS = 128 # Tag is 128 bits by default in AESGCM and not configurable
SYM_KEY_BITS = 256
# Config from env
ENV = os.environ['stello_env']
DEV = ENV == 'development'
VERSION = os.environ['stello_version']
MSGS_BUCKET = os.environ['stello_msgs_bucket']
RESP_BUCKET = MSGS_BUCKET + '-stello-resp'
REGION = os.environ['stello_region']
ROLLBAR_TOKEN = os.environ['stello_rollbar_responder'] # Client token (not server) as public
# Optional config
SELF_HOSTED = not os.environ.get('stello_domain_branded')
if SELF_HOSTED:
TOPIC_ARN = os.environ['stello_topic_arn']
else:
DOMAIN_BRANDED = os.environ['stello_domain_branded']
DOMAIN_GENERIC = os.environ['stello_domain_generic']
# Setup Rollbar
# WARN Must use blocking handler, otherwise lambda may finish before report is sent
# NOTE Version prefixed with 'v' so that traces match github tags
# SECURITY Don't expose local vars in report as could contain sensitive user content
rollbar.init(ROLLBAR_TOKEN, ENV, handler='blocking', code_version='v'+VERSION,
locals={'enabled': False}, root=str(Path(__file__).parent), enabled=not DEV)
def _rollbar_add_context(payload, **kwargs):
payload['data']['platform'] = 'client' # Allow client token rather than server, since public
return payload
rollbar.events.add_payload_handler(_rollbar_add_context)
# Access to AWS services
# NOTE Important to set region to avoid unnecessary redirects for e.g. s3
AWS_CONFIG = Config(region_name=REGION)
S3 = boto3.client('s3', config=AWS_CONFIG)
def entry(api_event, context):
"""Entrypoint that wraps main logic to add exception handling and CORS headers"""
# Handle GET requests (which don't send origin header so can't detect user)
if api_event['requestContext']['http']['method'] == 'GET':
try:
if api_event['requestContext']['http']['path'] == '/inviter/image':
return inviter_image(api_event)
# NOTE A number of companies crawl AWS services, so don't warn for invalid paths
raise Abort()
except Abort:
return {'statusCode': 400}
except:
# SECURITY Never reveal whether client or server error, just that it didn't work
_report_error(api_event)
return {'statusCode': 400}
# Determine expected origin (and detect user)
# NOTE Access-Control-Allow-Origin can only take one value, so must detect right one
if SELF_HOSTED:
user = '_user'
allowed_origin = f'https://{MSGS_BUCKET}.s3-{REGION}.amazonaws.com'
else:
# Hosted setup -- origin must be a subdomain of one of defined domains
user, _, root_origin = api_event['headers']['origin'].partition('//')[2].partition('.')
allowed_root = DOMAIN_GENERIC if root_origin == DOMAIN_GENERIC else DOMAIN_BRANDED
allowed_origin = f'https://{user}.{allowed_root}'
# If origin not allowed, 403 to prevent further processing of the request
if not DEV and api_event['headers']['origin'] != allowed_origin:
return {'statusCode': 403}
# Process event and catch exceptions
try:
response = _entry(api_event, user)
except Abort:
response = {'statusCode': 400}
except:
# SECURITY Never reveal whether client or server error, just that it didn't work
_report_error(api_event)
response = {'statusCode': 400}
# Add CORS headers so cross-domain request doesn't fail
response.setdefault('headers', {})
response['headers']['Access-Control-Allow-Origin'] = '*' if DEV else allowed_origin
if api_event['requestContext']['http']['method'] == 'OPTIONS':
response['headers']['Access-Control-Allow-Methods'] = 'GET,POST'
response['headers']['Access-Control-Allow-Headers'] = '*'
return response
def _entry(api_event, user):
"""Main processing logic
NOTE api_event format and response expected below
https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-develop-integrations-lambda.html
SECURITY Assume input may be malicious
SECURITY Never return anything back to recipient other than success status
Event data is expected to be: {
'config_secret': string,
'encrypted': string,
...
}
Data saved to response bucket is: encrypted JSON {
'event': ...,
'ip': string,
}
"""
# Handle CORS OPTIONS requests
if api_event['requestContext']['http']['method'] == 'OPTIONS':
return {'statusCode': 200}
# Handle POST requests
ip = api_event['requestContext']['http']['sourceIp']
event = json.loads(api_event['body'])
# These keys are required for all responses
_ensure_type(event, 'config_secret', str)
_ensure_type(event, 'encrypted', str)
# Load config (required to encrypt stored data, so can't do anything without)
config = _get_config(user, event['config_secret'])
# Get event type from path
resp_type = api_event['requestContext']['http']['path'].partition('/responder/')[2]
if resp_type not in VALID_TYPES:
raise Exception(f"Invalid value for response type: {resp_type}")
# Handle the event and then store it
handler = globals()[f'handle_{resp_type}']
handler(user, config, event)
_put_resp(config, resp_type, event, ip, user)
# Report success
return {'statusCode': 200}
# POST HANDLERS
def handle_read(user, config, event):
"""Delete message if reached max reads, otherwise increase read count
SECURITY While an attacker could circumvent this or send fake msg ids, there isn't much risk
This is mainly for triggering a delete if message shared widely when not permitted
Actual attackers would only need a single read anyway
For example, could be more reliable if separate lambda triggered by bucket requests
But more reliable doesn't necessarily mean more secure
resp_token is also still used Stello-side to verify reads
"""
# Expected fields
# SECURITY Yes attacker could change these value themselves but see above
_ensure_type(event, 'copy_id', str)
_ensure_type(event, 'has_max_reads', bool)
# Don't need to do anything if not tracking max reads
if not event['has_max_reads']:
return
# Get copies's tags
copy_key = f"messages/{user}/copies/{event['copy_id']}"
try:
resp = S3.get_object_tagging(Bucket=MSGS_BUCKET, Key=copy_key)
except S3.exceptions.NoSuchKey:
return # If msg already deleted, no reason to do any further processing (still report resp)
tags = {d['Key']: d['Value'] for d in resp['TagSet']}
# Parse and increase reads
reads = int(tags['stello-reads'])
max_reads = int(tags['stello-max-reads'])
reads += 1
tags['stello-reads'] = str(reads)
# Either delete message or update reads
if reads >= max_reads:
S3.delete_object(Bucket=MSGS_BUCKET, Key=copy_key)
# Also delete invite image
S3.delete_object(Bucket=MSGS_BUCKET, Key=f"messages/{user}/invite_images/{event['copy_id']}")
else:
S3.put_object_tagging(
Bucket=MSGS_BUCKET,
Key=copy_key,
Tagging={
# WARN MUST preserve other tags (like stello-lifespan!)
'TagSet': [{'Key': k, 'Value': v} for k, v in tags.items()],
},
)
def handle_reply(user, config, event):
"""Notify user of replies to their messages"""
if not config['allow_replies']:
raise Abort()
_send_notification(config, 'reply', event, user)
def handle_reaction(user, config, event):
"""Notify user of reactions to their messages"""
# Shouldn't be getting reactions if disabled them
if not config['allow_reactions']:
raise Abort()
# Ensure reaction is a short single hyphenated word if present (or null)
# SECURITY Prevents inserting long messages as a "reaction" but allows future codes too
# Noting that user may have enabled notifications for reactions, putting their value in emails
if 'content' in event and event['content'] is not None:
_ensure_valid_chars(event, 'content', string.ascii_letters + string.digits + '-')
if len(event['content']) > 25:
raise Exception("Reaction content too long")
_send_notification(config, 'reaction', event, user)
def handle_subscription(user, config, event):
"""Subscription modifications don't need any processing"""
def handle_address(user, config, event):
"""Subscription address modifications don't need any processing"""
def handle_resend(user, config, event):
"""Handle resend requests"""
if not config['allow_resend_requests']:
raise Abort()
_send_notification(config, 'resend', event, user)
def handle_delete(user, config, event):
# TODO Review this (event type currently disabled)
"""Handle a request to delete the recipient's copy of the message
SECURITY Stello config not checked, so technically recipient could delete manually even if the
option to is not presented in the message. Not considered a security risk.
SECURITY Recipient could technically delete any message copy
Since copies have unique ids, considered low risk, as they would only know their own
SECURITY Ensure this lambda fn can only delete messages (not other objects in bucket)
"""
copy_id = event['copy_id']
with suppress(S3.exceptions.NoSuchKey): # Already deleted is not a failure
S3.delete_object(Bucket=MSGS_BUCKET, Key=f'messages/{user}/copies/{copy_id}')
# HELPERS
class Abort(Exception):
"""Abort and respond with failure, but don't report any error"""
def _url64_to_bytes(url64_string):
"""Convert custom-url-base64 encoded string to bytes"""
return base64.urlsafe_b64decode(url64_string.replace('~', '='))
def _bytes_to_url64(bytes_data):
"""Convert bytes to custom-url-base64 encoded string"""
return base64.urlsafe_b64encode(bytes_data).decode().replace('=', '~')
def _get_config(user, secret):
"""Download, decrypt and parse responder config"""
encrypted = S3.get_object(Bucket=RESP_BUCKET, Key=f'config/{user}/config')['Body'].read()
decryptor = AESGCM(_url64_to_bytes(secret))
decrypted = decryptor.decrypt(encrypted[:SYM_IV_BYTES], encrypted[SYM_IV_BYTES:], None)
return json.loads(decrypted)
def _ensure_type(event, key, type_):
"""Ensure key's value is of given type"""
if not isinstance(event.get(key), type_):
raise Exception(f"Invalid or missing value for '{key}'")
def _ensure_valid_chars(event, key, valid_chars):
"""Ensure key's value is a string made of valid chars"""
_ensure_type(event, key, str)
if not event[key]:
raise Exception(f"Empty string for '{key}'")
for char in event[key]:
if char not in valid_chars:
raise Exception(f"Invalid character '{char}' in {key}")
def _report_error(api_event):
"""Report error"""
print(format_exc())
# Add request metadata if available
payload_data = {}
try:
payload_data = {
'request': {
'user_ip': api_event['requestContext']['http']['sourceIp'],
'headers': {
'User-Agent': api_event['requestContext']['http']['userAgent'],
},
},
}
except:
pass
# Send to Rollbar
rollbar.report_exc_info(payload_data=payload_data)
def _put_resp(config, resp_type, event, ip, user):
"""Save response object with encrypted data
SECURITY Ensure objects can't be placed in other dirs which app would never download
"""
# Work out object id
# Timestamp prefix for order, uuid suffix for uniqueness
object_id = f'responses/{user}/{resp_type}/{int(time())}_{uuid4()}'
# Encode data
data = json.dumps({
'event': event,
'ip': ip,
}).encode()
# Decode asym public key and setup asym encrypter
asym_key = _url64_to_bytes(config['resp_key_public'])
asym_encryter = load_der_public_key(asym_key)
# Generate sym key and encrypted form of it
sym_key = AESGCM.generate_key(SYM_KEY_BITS)
encrypted_key = asym_encryter.encrypt(sym_key, OAEP(MGF1(SHA256()), SHA256(), None))
# Encrypt data and produce output
sym_encrypter = AESGCM(sym_key)
iv = os.urandom(SYM_IV_BYTES)
encrypted_data = iv + sym_encrypter.encrypt(iv, data, None)
output = json.dumps({
'encrypted_data': _bytes_to_url64(encrypted_data),
'encrypted_key': _bytes_to_url64(encrypted_key),
})
# Store in bucket
S3.put_object(Bucket=RESP_BUCKET, Key=object_id, Body=output.encode())
def _count_resp_objects(user, resp_type):
"""Return a count of stored objects for the given response type
TODO Paginates at 1000, which may be a concern when counting reactions for popular users
"""
resp = S3.list_objects_v2(
Bucket=RESP_BUCKET,
Prefix=f'responses/{user}/{resp_type}/',
)
return resp['KeyCount']
def _send_notification(config, resp_type, event, user):
"""Notify user of replies/reactions/resends for their messages (if configured to)
Notify modes: none, first_new_reply, replies, replies_and_reactions
Including contents only applies to: replies, replies_and_reactions
"""
# Determine if a reaction or reply/resend
# NOTE To keep things simple, resends are considered "replies" for purpose of notifications
reaction = resp_type == 'reaction'
# Do nothing if notifications disabled
if config['notify_mode'] == 'none':
return
if reaction and config['notify_mode'] != 'replies_and_reactions':
return
# Ensure notify_include_contents takes into account notify_mode
if config['notify_mode'] == 'first_new_reply':
config['notify_include_contents'] = False
# Prepare message
# NOTE Possible to have race condition where contents should be included but isn't, so check
if config['notify_include_contents'] and 'content' in event:
# If content is null, just clearing a previous reaction, so don't notify
if event['content'] is None:
return
subject = "Stello: New reaction" if reaction else "Stello: New reply"
heading = "Someone reacted with:" if reaction else "Someone replied with:"
msg = event['content']
if SELF_HOSTED:
msg += "\n" * 10
msg += (
"#### MESSAGE END ####\n"
"Open Stello to identify who responded and to reply to them"
" (not possible via email for security reasons)."
" Ignore storage provider's notes below."
" Instead, change notification settings in Stello."
)
else:
# Work out counts
reply_count = _count_resp_objects(user, 'reply') + _count_resp_objects(user, 'resend')
reaction_count = _count_resp_objects(user, 'reaction')
if reaction:
reaction_count += 1
else:
reply_count += 1
# If notify_mode is first_new_reply then only continue if this is the first
# NOTE Already returned if a reaction and in this notify_mode
if config['notify_mode'] == 'first_new_reply' and reply_count != 1:
return
# Work out summary line (for both subject and msg)
reply_s = "reply" if reply_count == 1 else "replies"
reaction_s = "reaction" if reaction_count == 1 else "reactions"
summary = ""
if reply_count:
summary += f"{reply_count} new {reply_s}"
if reply_count and reaction_count:
summary += " and "
if reaction_count:
summary += f"{reaction_count} new {reaction_s}"
# Work out subject and heading
subject = "Stello: " + summary
heading = f"You have {summary} to your messages"
# Work out msg
msg = ""
if SELF_HOSTED:
msg += "Open Stello to see them"
msg += "\n" * 10
msg += "Ignore storage provider's notes below. Instead, change notification settings in Stello."
# In case multiple sending profiles, note the bucket name in the subject
subject += f" [{MSGS_BUCKET if SELF_HOSTED else user}]"
# Send notification
if not DEV:
if SELF_HOSTED:
boto3.client('sns', config=AWS_CONFIG).publish(
TopicArn=TOPIC_ARN, Subject=subject, Message=f'{heading}\n\n\n{msg}')
else:
boto3.client('ses', config=AWS_CONFIG).send_email(
Source=f"Stello <no-reply@{DOMAIN_BRANDED}>",
Destination={'ToAddresses': [config['email']]},
Message={
'Subject': {
'Data': subject,
'Charset': 'UTF-8',
},
'Body': {
'Html': {
'Data': generate_email(heading, msg),
'Charset': 'UTF-8',
},
},
},
)
# INVITER
def inviter_image(api_event):
"""Decrypt and respond with invite image"""
params = api_event.get('queryStringParameters', {})
user = params['user']
copy_id = params['copy']
secret = params['k']
bucket_key = f'messages/{user}/invite_images/{copy_id}'
try:
obj = S3.get_object(Bucket=MSGS_BUCKET, Key=bucket_key)
except:
body = EXPIRED_IMAGE
else:
encrypted = obj['Body'].read()
decryptor = AESGCM(_url64_to_bytes(secret))
decrypted = decryptor.decrypt(encrypted[:SYM_IV_BYTES], encrypted[SYM_IV_BYTES:], None)
body = base64.b64encode(decrypted).decode()
return {
'statusCode': 200,
'headers': {
'Content-Type': 'image/jpeg',
'Cache-Control': 'no-store',
},
'isBase64Encoded': True,
'body': body,
}
| 140 | 0 | 22 |
3d41d405150ed0e79bbadee5ab542a4512d98c48 | 2,718 | py | Python | controllers.py | afewyards/starcitizen_gremlin | a173711f96e4ad491901a0afd3b899fa08f76d2b | [
"MIT"
] | null | null | null | controllers.py | afewyards/starcitizen_gremlin | a173711f96e4ad491901a0afd3b899fa08f76d2b | [
"MIT"
] | null | null | null | controllers.py | afewyards/starcitizen_gremlin | a173711f96e4ad491901a0afd3b899fa08f76d2b | [
"MIT"
] | null | null | null | # Copyright (c) 2017 Thierry Kleist
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from gremlin import event_handler
from gremlin.input_devices import callback_registry
from gremlin.util import extract_ids, SingletonDecorator
from config import DeviceConfig
@SingletonDecorator
controllers = Controllers()
| 33.555556 | 75 | 0.702723 | # Copyright (c) 2017 Thierry Kleist
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from gremlin import event_handler
from gremlin.input_devices import callback_registry
from gremlin.util import extract_ids, SingletonDecorator
from config import DeviceConfig
class Device(object):
def __init__(self, name, device_id, mode):
self.name = name
self.mode = mode
self.device_id = device_id
self.hid, self.wid = extract_ids(device_id)
def addEvent(self, event_type, id, callback):
event = event_handler.Event(
event_type=event_type,
hardware_id=self.hid,
windows_id=self.wid,
identifier=id
)
callback_registry.add(callback, event, self.mode, False)
def addButtonEvent(self, callback, id):
self.addEvent(event_handler.InputType.JoystickButton, id, callback)
def addAxisEvent(self, callback, id):
self.addEvent(event_handler.InputType.JoystickAxis, id, callback)
def addHatEvent(self, callback, id):
self.addEvent(event_handler.InputType.JoystickHat, id, callback)
@SingletonDecorator
class Controllers:
def __init__(self):
self.joystick = Device(
name=DeviceConfig.joystick_name,
device_id=DeviceConfig.joystick_id,
mode="Default"
)
self.throttle = Device(
name=DeviceConfig.throttle_name,
device_id=DeviceConfig.throttle_id,
mode="Default"
)
self.rudder = Device(
name=DeviceConfig.rudder_name,
device_id=DeviceConfig.rudder_id,
mode="Default"
)
controllers = Controllers()
| 1,175 | -3 | 207 |
366c3dd1425e93e61a20c2ae2a3da2669aab8950 | 741 | py | Python | Computer & Information Science Core courses/2168/prims/node.py | Vaporjawn/Temple-University-Computer-Science-Resources | 8d54db3a85a1baa8ba344efc90593b440eb6d585 | [
"MIT"
] | 1 | 2020-07-28T16:18:38.000Z | 2020-07-28T16:18:38.000Z | Computer & Information Science Core courses/2168/prims/node.py | Vaporjawn/Temple-University-Computer-Science-Resources | 8d54db3a85a1baa8ba344efc90593b440eb6d585 | [
"MIT"
] | 4 | 2020-07-15T06:40:55.000Z | 2020-08-13T16:01:30.000Z | Computer & Information Science Core courses/2168/prims/node.py | Vaporjawn/Temple-University-Computer-Science-Resources | 8d54db3a85a1baa8ba344efc90593b440eb6d585 | [
"MIT"
] | null | null | null |
# TODO; input type verification
# Adds an weighted unidirectional edge to another existing node
# TODO: add verification that node exists
# Returns all of the edges connected to the current node
# Returns the weight of the edge connected the specified node to the current node
| 32.217391 | 92 | 0.775978 | class Node:
# TODO; input type verification
def __init__(self, name):
self.name = name
self.connected_nodes = []
self.connected_nodes_weights = {} # weights of the edges accessible by connected node name
# Adds an weighted unidirectional edge to another existing node
# TODO: add verification that node exists
def add_connection(self, node, weight):
self.connected_nodes.append(node)
self.connected_nodes_weights[node.name] = weight
# Returns all of the edges connected to the current node
def get_connections(self):
return self.connected_nodes
# Returns the weight of the edge connected the specified node to the current node
def get_connection_weight(self, node):
return self.connected_nodes_weights[node.name]
| 350 | -10 | 114 |
94c8cc94a2af46e25ce52873ef6ff30ccc268642 | 4,875 | py | Python | adv_setup.py | Derek318/Adversarial-Squad-CS224N | 9b4a5da2a262f4de9b9b05d7b67dc48b2b857e46 | [
"MIT"
] | 1 | 2020-11-12T02:49:32.000Z | 2020-11-12T02:49:32.000Z | adv_setup.py | Derek318/Adversarial-Squad-CS224N | 9b4a5da2a262f4de9b9b05d7b67dc48b2b857e46 | [
"MIT"
] | null | null | null | adv_setup.py | Derek318/Adversarial-Squad-CS224N | 9b4a5da2a262f4de9b9b05d7b67dc48b2b857e46 | [
"MIT"
] | null | null | null | import logging
import os
import queue
import random
import re
from args import get_setup_args
import shutil
import string
import setup
import torch
import torch.nn.functional as F
import torch.utils.data as data
from collections import Counter
import tqdm
import numpy as np
import ujson as json
import spacy
import json
from sklearn.model_selection import train_test_split
# 60 20 20 split
if __name__ == '__main__':
pre_process()
test_baseline()
| 39.314516 | 159 | 0.694359 | import logging
import os
import queue
import random
import re
from args import get_setup_args
import shutil
import string
import setup
import torch
import torch.nn.functional as F
import torch.utils.data as data
from collections import Counter
import tqdm
import numpy as np
import ujson as json
import spacy
import json
from sklearn.model_selection import train_test_split
# 60 20 20 split
def pre_process():
# Process training set and use it to decide on the word/character vocabularies
word_counter, char_counter = Counter(), Counter()
#This takes args.train_file
# all examples = [dicts]
# all_eval = {id -> dict}
#POTENTIAL BUG: MAY BE BAD TO DO WORD COUNTER ON "ENTIRE" DATASET rather than just train like in orig setup.py
all_examples, all_eval = setup.process_file("./adversarial_dataset.json", "all", word_counter, char_counter)
all_indices = list(map(lambda e: e['id'], all_examples))
# import pdb; pdb.set_trace()
# print(all_examples[0]["context_tokens"], all_examples[0]["ques_tokens"])
# print(all_examples[1]["context_tokens"], all_examples[1]["ques_tokens"])
# print(type(all_examples))
# print(type(all_eval))
# indices are from 0 to 3559 (3560 questions total)
# 2136 total questions and answers in train
# 712 questions + answers in dev
# 712 questions + answers in test
train_examples, residual_examples = train_test_split(all_examples, test_size=0.4)
dev_examples, test_examples = train_test_split(residual_examples, test_size=0.5)
train_eval = {str(e['id']) : all_eval[str(e['id'])] for e in train_examples}
dev_eval = {str(e['id']) : all_eval[str(e['id'])] for e in dev_examples}
test_eval = {str(e['id']) : all_eval[str(e['id'])] for e in test_examples}
# IMPORTANT: Ensure that we do not split corresponding question and answers into different datasets
assert set([str(e['id']) for e in train_examples]) == set(train_eval.keys())
assert set([str(e['id']) for e in dev_examples]) == set(dev_eval.keys())
assert set([str(e['id']) for e in test_examples]) == set(test_eval.keys())
# TODO: Call the rest of the setup.py to get the .npz files
# TODO: Once we have the .npz, we can call test on the adversarial data
# TODO: Re-train BiDAF on adversarial dataset
# TODO: Data augmentation
# TODO: Auxiliary Model to predict sentence relevancy
# ========= FROM SETUP.PY =========== #
# Need to create the .npz, .json files for dev, test, and train
# this is desired structure for training/testing
args = get_setup_args()
# Setup glove path for adversarial dataset
glove_dir = setup.url_to_data_path(args.glove_url.replace('.zip', ''))
glove_ext = f'.txt' if glove_dir.endswith('d') else f'.{args.glove_dim}d.txt'
args.glove_file = os.path.join(glove_dir, os.path.basename(glove_dir) + glove_ext)
# Setup word, char embeddings for adversarial data
word_emb_mat, word2idx_dict = setup.get_embedding(word_counter, 'word', emb_file=args.glove_file, vec_size=args.glove_dim, num_vectors=args.glove_num_vecs)
char_emb_mat, char2idx_dict = setup.get_embedding(char_counter, 'char', emb_file=None, vec_size=args.char_dim)
#args.train_record_file is the .npz file path that we want to save stuff to
setup.build_features(args, train_examples, "train", "./adv_data/train.npz", word2idx_dict, char2idx_dict)
dev_meta = setup.build_features(args, dev_examples, "dev", "./adv_data/dev.npz", word2idx_dict, char2idx_dict)
# True by default
if args.include_test_examples:
# Step done above
# test_examples, test_eval = process_file("./adversarial_dataset/test-v2.0.json", "adv test", word_counter, char_counter)
setup.save("./adv_data/test_eval.json", test_eval, message="adv test eval")
test_meta = setup.build_features(args, test_examples, "adv test", "./adv_data/test.npz", word2idx_dict, char2idx_dict, is_test=True)
setup.save("./adv_data/test_meta.json", test_meta, message="adv test meta")
setup.save("./adv_data/word_emb.json", word_emb_mat, message="word embedding")
setup.save("./adv_data/char_emb.json", char_emb_mat, message="char embedding")
setup.save("./adv_data/train_eval.json", train_eval, message="adv train eval")
setup.save("./adv_data/dev_eval.json", dev_val, message="adv dev eval")
setup.save("./adv_data/word2idx.json", word2idx_dict, message="word dictionary")
setup.save("./adv_data/char2idx.json", char2idx_dict, message="char dictionary")
setup.save("./adv_data/dev_meta.json", dev_meta, message="adv dev meta")
# ========= FROM SETUP.PY =========== #
def test_baseline():
pass
if __name__ == '__main__':
pre_process()
test_baseline()
| 4,322 | 0 | 58 |
62db68da161d1ea9d14e592b1e3cf0ae819a76bf | 1,612 | py | Python | udacity/self-driving-intro/2-bayesian-thinking/12/test.py | adriancarriger/experiments | 7e4248592dc8fbb08522c9b5f0393c80dc7e2699 | [
"MIT"
] | 1 | 2021-06-22T13:38:36.000Z | 2021-06-22T13:38:36.000Z | udacity/self-driving-intro/2-bayesian-thinking/12/test.py | adriancarriger/experiments | 7e4248592dc8fbb08522c9b5f0393c80dc7e2699 | [
"MIT"
] | 108 | 2019-05-23T16:12:32.000Z | 2020-09-04T15:47:33.000Z | udacity/self-driving-intro/2-bayesian-thinking/12/test.py | adriancarriger/experiments | 7e4248592dc8fbb08522c9b5f0393c80dc7e2699 | [
"MIT"
] | null | null | null | # from './localizer' import localizer
import localizer
import helpers
test_sense()
| 23.362319 | 75 | 0.604218 | # from './localizer' import localizer
import localizer
import helpers
def test_sense():
R = 'r'
_ = 'g'
simple_grid = [
[_, _, _],
[_, R, _],
[_, _, _]
]
p = 1.0 / 9
initial_beliefs = [
[p, p, p],
[p, p, p],
[p, p, p]
]
observation = R
expected_beliefs_after = [
[1/11, 1/11, 1/11],
[1/11, 3/11, 1/11],
[1/11, 1/11, 1/11]
]
p_hit = 3.0
p_miss = 1.0
beliefs_after_sensing = localizer.sense(
observation, simple_grid, initial_beliefs, p_hit, p_miss)
if helpers.close_enough(beliefs_after_sensing, expected_beliefs_after):
print("Tests pass! Your sense function is working as expected")
return
elif not isinstance(beliefs_after_sensing, list):
print("Your sense function doesn't return a list!")
return
elif len(beliefs_after_sensing) != len(expected_beliefs_after):
print("Dimensionality error! Incorrect height")
return
elif len(beliefs_after_sensing[0]) != len(expected_beliefs_after[0]):
print("Dimensionality Error! Incorrect width")
return
elif beliefs_after_sensing == initial_beliefs:
print("Your code returns the initial beliefs.")
return
total_probability = 0.0
for row in beliefs_after_sensing:
for p in row:
total_probability += p
if abs(total_probability-1.0) > 0.001:
print("Your beliefs appear to not be normalized")
return
print("Something isn't quite right with your sense function")
test_sense()
| 1,503 | 0 | 23 |
deecfb2ff8809fa583a186388e95973a391ea0c6 | 3,577 | py | Python | volDB/migrations/0001_initial.py | leg2015/CSCapstone19Volunteers | ae0fcf1e8ce4fafe8578edd0a3943574703046fa | [
"MIT"
] | 4 | 2020-01-13T23:30:34.000Z | 2021-03-17T21:23:57.000Z | volDB/migrations/0001_initial.py | leg2015/CSCapstone19Volunteers | ae0fcf1e8ce4fafe8578edd0a3943574703046fa | [
"MIT"
] | 5 | 2020-02-12T03:25:17.000Z | 2021-06-10T22:29:16.000Z | volDB/migrations/0001_initial.py | leg2015/CSCapstone19Volunteers | ae0fcf1e8ce4fafe8578edd0a3943574703046fa | [
"MIT"
] | null | null | null | # Generated by Django 2.1.7 on 2019-02-23 18:47
from django.db import migrations, models
import django.db.models.deletion
| 41.593023 | 137 | 0.574224 | # Generated by Django 2.1.7 on 2019-02-23 18:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('addressID', models.AutoField(db_column='addressID', primary_key=True, serialize=False)),
('street', models.CharField(db_column='street', max_length=100)),
('city', models.CharField(db_column='city', max_length=20)),
('state', models.CharField(db_column='state', max_length=20)),
('zipCode', models.IntegerField(db_column='zipCode')),
],
),
migrations.CreateModel(
name='Category',
fields=[
('category', models.CharField(db_column='category', max_length=20)),
('categoryID', models.AutoField(db_column='categoryID', primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='Email',
fields=[
('email', models.EmailField(db_column='email', max_length=254)),
('emailID', models.AutoField(db_column='emailID', primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='Location',
fields=[
('location', models.CharField(db_column='location', max_length=20)),
('locationID', models.AutoField(db_column='locationID', primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='Organization',
fields=[
('name', models.CharField(db_column='orgName', max_length=100)),
('orgID', models.AutoField(db_column='orgID', primary_key=True, serialize=False)),
('mission', models.TextField(db_column='missionStatement')),
('opportunities', models.TextField(db_column='volOpportunities')),
('website', models.URLField(db_column='volURL')),
('notes', models.TextField(db_column='notes')),
],
),
migrations.CreateModel(
name='Phone',
fields=[
('phoneID', models.AutoField(db_column='phoneID', primary_key=True, serialize=False)),
('phone', models.CharField(db_column='phone', max_length=10)),
('orgid', models.ForeignKey(db_column='orgID', on_delete=django.db.models.deletion.DO_NOTHING, to='volDB.Organization')),
],
),
migrations.AddField(
model_name='location',
name='orgID',
field=models.ForeignKey(db_column='orgID', on_delete=django.db.models.deletion.DO_NOTHING, to='volDB.Organization'),
),
migrations.AddField(
model_name='email',
name='orgID',
field=models.ForeignKey(db_column='orgID', on_delete=django.db.models.deletion.DO_NOTHING, to='volDB.Organization'),
),
migrations.AddField(
model_name='category',
name='orgID',
field=models.ForeignKey(db_column='orgID', on_delete=django.db.models.deletion.DO_NOTHING, to='volDB.Organization'),
),
migrations.AddField(
model_name='address',
name='orgID',
field=models.ForeignKey(db_column='orgID', on_delete=django.db.models.deletion.DO_NOTHING, to='volDB.Organization'),
),
]
| 0 | 3,430 | 23 |
5bf95b53dad15597c23e37c17856b5c97c6c0117 | 627 | py | Python | Python Fundamentals/Dictionaries/Exercise/Task10.py | IvanTodorovBG/SoftUni | 7b667f6905d9f695ab1484efbb02b6715f6d569e | [
"MIT"
] | 1 | 2022-03-16T10:23:04.000Z | 2022-03-16T10:23:04.000Z | Python Fundamentals/Dictionaries/Exercise/Task10.py | IvanTodorovBG/SoftUni | 7b667f6905d9f695ab1484efbb02b6715f6d569e | [
"MIT"
] | null | null | null | Python Fundamentals/Dictionaries/Exercise/Task10.py | IvanTodorovBG/SoftUni | 7b667f6905d9f695ab1484efbb02b6715f6d569e | [
"MIT"
] | null | null | null | command = input()
company_users = {}
# add all company users in dictionary
while command != "End":
command = command.split(" -> ")
company = command[0]
users = command[1]
company_users.setdefault(company, []).append(users)
command = input()
# remove duplicating users
for k, v in company_users.items():
new_v = sorted(set(v), key=v.index)
company_users[k] = new_v
# sort
sorted_company_users = dict(sorted(company_users.items(), key=lambda x: x[0]))
# print
for key, value in sorted_company_users.items():
print(key)
print("\n".join(f"-- {val}" for val in sorted_company_users[key]))
| 23.222222 | 78 | 0.669856 | command = input()
company_users = {}
# add all company users in dictionary
while command != "End":
command = command.split(" -> ")
company = command[0]
users = command[1]
company_users.setdefault(company, []).append(users)
command = input()
# remove duplicating users
for k, v in company_users.items():
new_v = sorted(set(v), key=v.index)
company_users[k] = new_v
# sort
sorted_company_users = dict(sorted(company_users.items(), key=lambda x: x[0]))
# print
for key, value in sorted_company_users.items():
print(key)
print("\n".join(f"-- {val}" for val in sorted_company_users[key]))
| 0 | 0 | 0 |
dbc4c3310857e85d9121fdbadd2b16c4be0bc6f2 | 5,045 | py | Python | django_op/oidc_op/users.py | peppelinux/oidc-op | c0385b5cbdb48fe2f74a556174d26444a48e6bed | [
"Apache-2.0"
] | 1 | 2020-09-30T13:07:48.000Z | 2020-09-30T13:07:48.000Z | django_op/oidc_op/users.py | peppelinux/oidc-op | c0385b5cbdb48fe2f74a556174d26444a48e6bed | [
"Apache-2.0"
] | null | null | null | django_op/oidc_op/users.py | peppelinux/oidc-op | c0385b5cbdb48fe2f74a556174d26444a48e6bed | [
"Apache-2.0"
] | null | null | null | import copy
from django.contrib.auth import authenticate, get_user_model
from django.contrib.auth import get_user_model
from django.template.loader import render_to_string
from oidcendpoint.util import instantiate
from oidcendpoint.user_authn.user import (create_signed_jwt,
verify_signed_jwt)
from oidcendpoint.user_authn.user import UserAuthnMethod
class UserPassDjango(UserAuthnMethod):
"""
see oidcendpoint.authn_context
oidcendpoint.endpoint_context
https://docs.djangoproject.com/en/2.2/ref/templates/api/#rendering-a-context
"""
# TODO: get this though settings conf
url_endpoint = "/verify/user_pass_django"
def __init__(self,
# template_handler=render_to_string,
template="oidc_login.html",
endpoint_context=None, verify_endpoint='', **kwargs):
"""
template_handler is only for backwards compatibility
it will be always replaced by Django's default
"""
super(UserPassDjango, self).__init__(endpoint_context=endpoint_context)
self.kwargs = kwargs
self.kwargs.setdefault("page_header", "Log in")
self.kwargs.setdefault("user_label", "Username")
self.kwargs.setdefault("passwd_label", "Password")
self.kwargs.setdefault("submit_btn", "Log in")
self.kwargs.setdefault("tos_uri", "")
self.kwargs.setdefault("logo_uri", "")
self.kwargs.setdefault("policy_uri", "")
self.kwargs.setdefault("tos_label", "")
self.kwargs.setdefault("logo_label", "")
self.kwargs.setdefault("policy_label", "")
# TODO this could be taken from args
self.template_handler = render_to_string
self.template = template
self.action = verify_endpoint or self.url_endpoint
self.kwargs['action'] = self.action
class UserInfo(object):
""" Read only interface to a user info store """
def filter(self, user, user_info_claims=None):
"""
Return only those claims that are asked for.
It's a best effort task; if essential claims are not present
no error is flagged.
:param userinfo: A dictionary containing the available info for one user
:param user_info_claims: A dictionary specifying the asked for claims
:return: A dictionary of filtered claims.
"""
result = {}
if not user.is_active:
return result
if user_info_claims is None:
return copy.copy(user.__dict__)
else:
missing = []
optional = []
for key, restr in user_info_claims.items():
if key in self.claims_map:
# manage required and optional: TODO extends this approach
if not hasattr(user, self.claims_map[key]) and restr == {"essential": True}:
missing.append(key)
continue
else:
optional.append(key)
#
uattr = getattr(user, self.claims_map[key], None)
if not uattr: continue
result[key] = uattr() if callable(uattr) else uattr
return result
def __call__(self, user_id, client_id, user_info_claims=None, **kwargs):
"""
user_id = username
client_id = client id, ex: 'mHwpZsDeWo5g'
"""
user = get_user_model().objects.filter(username=user_id).first()
if not user:
# Todo: raise exception here, this wouldn't be possible.
return {}
try:
return self.filter(user, user_info_claims)
except KeyError:
return {}
| 34.319728 | 96 | 0.595639 | import copy
from django.contrib.auth import authenticate, get_user_model
from django.contrib.auth import get_user_model
from django.template.loader import render_to_string
from oidcendpoint.util import instantiate
from oidcendpoint.user_authn.user import (create_signed_jwt,
verify_signed_jwt)
from oidcendpoint.user_authn.user import UserAuthnMethod
class UserPassDjango(UserAuthnMethod):
"""
see oidcendpoint.authn_context
oidcendpoint.endpoint_context
https://docs.djangoproject.com/en/2.2/ref/templates/api/#rendering-a-context
"""
# TODO: get this though settings conf
url_endpoint = "/verify/user_pass_django"
def __init__(self,
# template_handler=render_to_string,
template="oidc_login.html",
endpoint_context=None, verify_endpoint='', **kwargs):
"""
template_handler is only for backwards compatibility
it will be always replaced by Django's default
"""
super(UserPassDjango, self).__init__(endpoint_context=endpoint_context)
self.kwargs = kwargs
self.kwargs.setdefault("page_header", "Log in")
self.kwargs.setdefault("user_label", "Username")
self.kwargs.setdefault("passwd_label", "Password")
self.kwargs.setdefault("submit_btn", "Log in")
self.kwargs.setdefault("tos_uri", "")
self.kwargs.setdefault("logo_uri", "")
self.kwargs.setdefault("policy_uri", "")
self.kwargs.setdefault("tos_label", "")
self.kwargs.setdefault("logo_label", "")
self.kwargs.setdefault("policy_label", "")
# TODO this could be taken from args
self.template_handler = render_to_string
self.template = template
self.action = verify_endpoint or self.url_endpoint
self.kwargs['action'] = self.action
def __call__(self, **kwargs):
_ec = self.endpoint_context
# Stores information need afterwards in a signed JWT that then
# appears as a hidden input in the form
jws = create_signed_jwt(_ec.issuer, _ec.keyjar, **kwargs)
self.kwargs['token'] = jws
_kwargs = self.kwargs.copy()
for attr in ['policy', 'tos', 'logo']:
_uri = '{}_uri'.format(attr)
try:
_kwargs[_uri] = kwargs[_uri]
except KeyError:
pass
else:
_label = '{}_label'.format(attr)
_kwargs[_label] = LABELS[_uri]
return self.template_handler(self.template, _kwargs)
def verify(self, *args, **kwargs):
username = kwargs["username"]
password = kwargs["password"]
user = authenticate(username=username, password=password)
if username:
return user
else:
raise FailedAuthentication()
class UserInfo(object):
""" Read only interface to a user info store """
def __init__(self, *args, **kwargs):
self.claims_map = kwargs.get('claims_map', {})
def filter(self, user, user_info_claims=None):
"""
Return only those claims that are asked for.
It's a best effort task; if essential claims are not present
no error is flagged.
:param userinfo: A dictionary containing the available info for one user
:param user_info_claims: A dictionary specifying the asked for claims
:return: A dictionary of filtered claims.
"""
result = {}
if not user.is_active:
return result
if user_info_claims is None:
return copy.copy(user.__dict__)
else:
missing = []
optional = []
for key, restr in user_info_claims.items():
if key in self.claims_map:
# manage required and optional: TODO extends this approach
if not hasattr(user, self.claims_map[key]) and restr == {"essential": True}:
missing.append(key)
continue
else:
optional.append(key)
#
uattr = getattr(user, self.claims_map[key], None)
if not uattr: continue
result[key] = uattr() if callable(uattr) else uattr
return result
def __call__(self, user_id, client_id, user_info_claims=None, **kwargs):
"""
user_id = username
client_id = client id, ex: 'mHwpZsDeWo5g'
"""
user = get_user_model().objects.filter(username=user_id).first()
if not user:
# Todo: raise exception here, this wouldn't be possible.
return {}
try:
return self.filter(user, user_info_claims)
except KeyError:
return {}
def search(self, **kwargs):
for uid, args in self.db.items():
if dict_subset(kwargs, args):
return uid
raise KeyError('No matching user')
| 1,167 | 0 | 108 |
b88cd470cbd6ea92ae15dc576cddd38972675cdd | 2,608 | py | Python | foxylib/tools/native/typing/typing_tool.py | foxytrixy-com/foxylib | 94b8c5b9f8b12423393c68f7d9f910258840ed18 | [
"BSD-3-Clause"
] | null | null | null | foxylib/tools/native/typing/typing_tool.py | foxytrixy-com/foxylib | 94b8c5b9f8b12423393c68f7d9f910258840ed18 | [
"BSD-3-Clause"
] | 3 | 2019-12-12T05:17:44.000Z | 2022-03-11T23:40:50.000Z | foxylib/tools/native/typing/typing_tool.py | foxytrixy-com/foxylib | 94b8c5b9f8b12423393c68f7d9f910258840ed18 | [
"BSD-3-Clause"
] | 2 | 2019-10-16T17:39:34.000Z | 2020-02-10T06:32:08.000Z | from collections import Hashable
from typing import Union, Any, TypeVar, Optional, Tuple, List
from foxylib.tools.native.typing._typing_tool_helper import is_instance, \
is_subtype, is_generic
T = TypeVar("T")
| 24.603774 | 74 | 0.609663 | from collections import Hashable
from typing import Union, Any, TypeVar, Optional, Tuple, List
from foxylib.tools.native.typing._typing_tool_helper import is_instance, \
is_subtype, is_generic
T = TypeVar("T")
class TypingTool:
class NotAnnotationError(Exception):
pass
@classmethod
def pair_type(cls, T):
return Union[Tuple[T, T], List[T]]
@classmethod
def is_annotation(cls, annotation):
try:
annotation.mro()
return True
except AttributeError:
pass
special_annotations = {Any,}
if isinstance(annotation, Hashable):
if annotation in special_annotations:
return True
if is_generic(annotation):
return True
if isinstance(annotation, TypeVar):
return True
return False
@classmethod
def is_instance(cls, obj, annotation):
if not cls.is_annotation(annotation):
raise cls.NotAnnotationError(annotation)
return is_instance(obj, annotation)
@classmethod
def is_subtype(cls, sub_type, super_type):
if not cls.is_annotation(sub_type):
raise cls.NotAnnotationError(sub_type)
if not cls.is_annotation(super_type):
raise cls.NotAnnotationError(super_type)
return is_subtype(sub_type, super_type)
@classmethod
def get_origin(cls, annotation):
"""
https://docs.python.org/3/library/typing.html#typing.get_args
typing.get_origin() doesn't exists for old version python
alternative - https://stackoverflow.com/a/49471187
:param type_in:
:return:
"""
if not cls.is_annotation(annotation):
return None
# raise cls.NotAnnotationError(annotation)
try:
return getattr(annotation, '__origin__')
except AttributeError:
return None
@classmethod
def get_args(cls, type_in):
"""
https://docs.python.org/3/library/typing.html#typing.get_args
:param type_in:
:return:
"""
return getattr(type_in, '__args__', tuple([]),)
@classmethod
def is_optional(cls, type_in):
if isinstance(type_in, (dict,list)):
return False
if type_in is None:
return True
if type_in is Optional:
return True
# if callable(type_in):
# return False
if cls.get_origin(type_in) is not Union:
return False
return isinstance(None, cls.get_args(type_in))
| 1,290 | 1,076 | 23 |
609ab129a24150af072b34c2796b2e752a5d40c4 | 2,111 | py | Python | nex2art/menu/UserEdit.py | ghl1024/nexus2artifactory | 1b300e1ea9c51d51a89096e8b710a0763750c38d | [
"Apache-2.0"
] | 50 | 2018-08-30T00:39:16.000Z | 2022-01-27T10:08:19.000Z | nex2art/menu/UserEdit.py | ghl1024/nexus2artifactory | 1b300e1ea9c51d51a89096e8b710a0763750c38d | [
"Apache-2.0"
] | 68 | 2018-06-12T10:37:01.000Z | 2022-01-10T02:47:12.000Z | nex2art/menu/UserEdit.py | ghl1024/nexus2artifactory | 1b300e1ea9c51d51a89096e8b710a0763750c38d | [
"Apache-2.0"
] | 38 | 2018-06-11T10:38:03.000Z | 2021-11-12T15:00:21.000Z | from ..core import Menu
from . import ItemListEdit
from . import ChooseList
| 39.830189 | 78 | 0.54145 | from ..core import Menu
from . import ItemListEdit
from . import ChooseList
class UserEdit(Menu):
def __init__(self, scr, path):
Menu.__init__(self, scr, path, "Edit User Options")
f, g, h = self.buildgroupedit, self.makegroupedit, lambda x: x['text']
grp = self.submenu(ItemListEdit, "Groups", f, g, h)
self.opts = [
self.mkopt('INFO', "User Name (Nexus)", None),
self.mkopt('n', "User Name (Artifactory)", ['|', self.fixname]),
self.mkopt('m', "Migrate This User", '+'),
None,
self.mkopt('INFO', "Realm", None),
self.mkopt('e', "Email Address", '|'),
self.mkopt('p', "Password", '*'),
self.mkopt('g', "Groups", grp, save=True),
self.mkopt('a', "Is An Administrator", '+'),
self.mkopt('d', "Is Enabled", '+'),
None,
self.mkopt('h', "Help", '?'),
self.mkopt('q', "Back", None, hdoc=False)]
def buildgroupedit(self, itemlist):
tform = lambda x: x['groupName']
groupslist = self.scr.nexus.security.roles.values()
return [ChooseList(self.scr, None, "Group", tform, groupslist)]
def makegroupedit(self, grp, itemlist):
if grp == None: return False
def nil(_): pass
if 'groupName' in grp: grp = grp['groupName']
for group in itemlist.pagedopts:
if group['text'] == grp:
msg = "This user already belongs to that group"
self.scr.msg = ('err', msg)
return False
return itemlist.mkopt(None, grp, nil, alt=itemlist.delitem)
def fixname(self, newname):
if newname['val'] != None:
newname['val'] = newname['val'].strip()
if newname['val'] == '':
newname['val'] = None
def filt(self, filt):
name1 = self.scr.state[self.path]["User Name (Nexus)"].data
name2 = self.scr.state[self.path]["User Name (Artifactory)"].data
for f in filt:
if f not in name1 and f not in name2: return False
return True
| 1,878 | 0 | 157 |
4e3ee5226a352745afd75a2808d1f691fc9cc9b1 | 2,512 | py | Python | MoleculeMOTScripts/optimas/motmaster_wrapper.py | ColdMatter/EDMSuite | 80a8bc0f3fd9d33a081f606707140de51512b28a | [
"MIT"
] | 6 | 2017-02-02T17:54:23.000Z | 2021-07-03T12:41:36.000Z | MoleculeMOTScripts/optimas/motmaster_wrapper.py | ColdMatter/EDMSuite | 80a8bc0f3fd9d33a081f606707140de51512b28a | [
"MIT"
] | null | null | null | MoleculeMOTScripts/optimas/motmaster_wrapper.py | ColdMatter/EDMSuite | 80a8bc0f3fd9d33a081f606707140de51512b28a | [
"MIT"
] | 11 | 2015-03-19T18:23:38.000Z | 2021-02-18T11:05:51.000Z | from __future__ import print_function
import clr
import sys
from System.IO import Path
import time
sys.path.append(Path.GetFullPath("C:\\ControlPrograms\\EDMSuite\\MOTMaster\\bin\\CaF\\"))
clr.AddReference("C:\\ControlPrograms\\EDMSuite\\MOTMaster\\bin\\CaF\\MOTMaster.exe")
sys.path.append(Path.GetFullPath("C:\\ControlPrograms\\EDMSuite\\MoleculeMOTHardwareControl\\bin\\CaF\\"))
clr.AddReference("C:\\ControlPrograms\\EDMSuite\\MoleculeMOTHardwareControl\\bin\\CaF\\MoleculeMOTHardwareControl.exe")
clr.AddReference("C:\\ControlPrograms\\EDMSuite\\MoleculeMOTHardwareControl\\bin\\CaF\\DAQ.dll")
clr.AddReference("C:\\ControlPrograms\\EDMSuite\\MoleculeMOTHardwareControl\\bin\\CaF\\SharedCode.dll")
# Load some system assemblies that we'll need
clr.AddReference("System.Drawing")
clr.AddReference("System.Windows.Forms")
clr.AddReference("System.Xml")
# create connections to the control programs
import System
#import ScanMaster
import MOTMaster
import MoleculeMOTHardwareControl
#sm = typedproxy(System.Activator.GetObject(ScanMaster.Controller, 'tcp://localhost:1170/controller.rem'), #ScanMaster.Controller)
hc = System.Activator.GetObject(MoleculeMOTHardwareControl.Controller, 'tcp://localhost:1172/controller.rem')
mm = System.Activator.GetObject(MOTMaster.Controller, 'tcp://localhost:1187/controller.rem')
# some generic stuff
from System.IO import *
from System.Drawing import *
from System.Runtime.Remoting import *
from System.Threading import *
from System.Windows.Forms import *
from System.Xml.Serialization import *
from System import *
from System.Collections.Generic import Dictionary
import time
import itertools
from random import shuffle
# specific EDMSuite stuff
from DAQ.Environment import *
from DAQ import *
from MOTMaster import *
| 36.941176 | 130 | 0.789013 | from __future__ import print_function
import clr
import sys
from System.IO import Path
import time
sys.path.append(Path.GetFullPath("C:\\ControlPrograms\\EDMSuite\\MOTMaster\\bin\\CaF\\"))
clr.AddReference("C:\\ControlPrograms\\EDMSuite\\MOTMaster\\bin\\CaF\\MOTMaster.exe")
sys.path.append(Path.GetFullPath("C:\\ControlPrograms\\EDMSuite\\MoleculeMOTHardwareControl\\bin\\CaF\\"))
clr.AddReference("C:\\ControlPrograms\\EDMSuite\\MoleculeMOTHardwareControl\\bin\\CaF\\MoleculeMOTHardwareControl.exe")
clr.AddReference("C:\\ControlPrograms\\EDMSuite\\MoleculeMOTHardwareControl\\bin\\CaF\\DAQ.dll")
clr.AddReference("C:\\ControlPrograms\\EDMSuite\\MoleculeMOTHardwareControl\\bin\\CaF\\SharedCode.dll")
# Load some system assemblies that we'll need
clr.AddReference("System.Drawing")
clr.AddReference("System.Windows.Forms")
clr.AddReference("System.Xml")
# create connections to the control programs
import System
#import ScanMaster
import MOTMaster
import MoleculeMOTHardwareControl
#sm = typedproxy(System.Activator.GetObject(ScanMaster.Controller, 'tcp://localhost:1170/controller.rem'), #ScanMaster.Controller)
hc = System.Activator.GetObject(MoleculeMOTHardwareControl.Controller, 'tcp://localhost:1172/controller.rem')
mm = System.Activator.GetObject(MOTMaster.Controller, 'tcp://localhost:1187/controller.rem')
# some generic stuff
from System.IO import *
from System.Drawing import *
from System.Runtime.Remoting import *
from System.Threading import *
from System.Windows.Forms import *
from System.Xml.Serialization import *
from System import *
from System.Collections.Generic import Dictionary
import time
import itertools
from random import shuffle
# specific EDMSuite stuff
from DAQ.Environment import *
from DAQ import *
from MOTMaster import *
def single_param_single_shot(script_name, parameter_name, value):
dict_instance = Dictionary[String, Object]()
script_path = 'C:\\ControlPrograms\\EDMSuite\\MoleculeMOTMasterScripts\\{}.cs'.format(script_name)
mm.SetScriptPath(script_path)
dict_instance[parameter_name] = value
mm.Go(dict_instance)
return True
def multi_param_single_shot(script_name, parameter_names, values):
dict_instance = Dictionary[String, Object]()
script_path = 'C:\\ControlPrograms\\EDMSuite\\MoleculeMOTMasterScripts\\{}.cs'.format(script_name)
mm.SetScriptPath(script_path)
for parameter_name, value in zip(parameter_names, values):
dict_instance[parameter_name] = value
mm.Go(dict_instance)
return True
| 694 | 0 | 46 |
978386cf7120391264f4a613312715b934b28b6f | 1,383 | py | Python | pyverilog/utils/op2mark.py | stdavids/Pyverilog | a201b86e1d1b237d205dce897dc823725abcd79b | [
"Apache-2.0"
] | 4 | 2019-09-26T18:59:43.000Z | 2021-12-07T01:25:09.000Z | pyverilog/utils/op2mark.py | stdavids/Pyverilog | a201b86e1d1b237d205dce897dc823725abcd79b | [
"Apache-2.0"
] | 1 | 2022-01-30T12:30:01.000Z | 2022-01-30T12:30:01.000Z | pyverilog/utils/op2mark.py | stdavids/Pyverilog | a201b86e1d1b237d205dce897dc823725abcd79b | [
"Apache-2.0"
] | 2 | 2020-06-18T02:23:14.000Z | 2022-01-30T10:04:46.000Z | #-------------------------------------------------------------------------------
# op2mark.py
#
# converting an operator to its mark
#
# Copyright (C) 2013, Shinya Takamaeda-Yamazaki
# License: Apache 2.0
#-------------------------------------------------------------------------------
operator_mark = {
'Uminus':'-', 'Ulnot':'!', 'Unot':'~', 'Uand':'&', 'Unand':'~&',
'Uor':'|', 'Unor':'~|', 'Uxor':'^', 'Uxnor':'~^',
'Power':'**', 'Times':'*', 'Divide':'/', 'Mod':'%',
'Plus':'+', 'Minus':'-',
'Sll':'<<', 'Srl':'>>', 'Sra':'>>>',
'LessThan':'<', 'GreaterThan':'>', 'LessEq':'<=', 'GreaterEq':'>=',
'Eq':'==', 'NotEq':'!=', 'Eql':'===', 'NotEql':'!==',
'And':'&', 'Xor':'^', 'Xnor':'~^',
'Or':'|', 'Land':'&&', 'Lor':'||'
}
operator_order = {
'Uminus':0, 'Ulnot':0, 'Unot':0, 'Uand':0, 'Unand':0,
'Uor':0, 'Unor':0, 'Uxor':0, 'Uxnor':0,
'Power':1,
'Times':2, 'Divide':2, 'Mod':2,
'Plus':3, 'Minus':3,
'Sll':4, 'Srl':4, 'Sra':4,
'LessThan':5, 'GreaterThan':5, 'LessEq':5, 'GreaterEq':5,
'Eq':6, 'NotEq':6, 'Eql':6, 'NotEql':6,
'And':7, 'Xor':7, 'Xnor':7,
'Or':8,
'Land':9,
'Lor':10
}
| 30.065217 | 80 | 0.415763 | #-------------------------------------------------------------------------------
# op2mark.py
#
# converting an operator to its mark
#
# Copyright (C) 2013, Shinya Takamaeda-Yamazaki
# License: Apache 2.0
#-------------------------------------------------------------------------------
operator_mark = {
'Uminus':'-', 'Ulnot':'!', 'Unot':'~', 'Uand':'&', 'Unand':'~&',
'Uor':'|', 'Unor':'~|', 'Uxor':'^', 'Uxnor':'~^',
'Power':'**', 'Times':'*', 'Divide':'/', 'Mod':'%',
'Plus':'+', 'Minus':'-',
'Sll':'<<', 'Srl':'>>', 'Sra':'>>>',
'LessThan':'<', 'GreaterThan':'>', 'LessEq':'<=', 'GreaterEq':'>=',
'Eq':'==', 'NotEq':'!=', 'Eql':'===', 'NotEql':'!==',
'And':'&', 'Xor':'^', 'Xnor':'~^',
'Or':'|', 'Land':'&&', 'Lor':'||'
}
def op2mark(op):
if op not in operator_mark:
return None
return operator_mark[op]
operator_order = {
'Uminus':0, 'Ulnot':0, 'Unot':0, 'Uand':0, 'Unand':0,
'Uor':0, 'Unor':0, 'Uxor':0, 'Uxnor':0,
'Power':1,
'Times':2, 'Divide':2, 'Mod':2,
'Plus':3, 'Minus':3,
'Sll':4, 'Srl':4, 'Sra':4,
'LessThan':5, 'GreaterThan':5, 'LessEq':5, 'GreaterEq':5,
'Eq':6, 'NotEq':6, 'Eql':6, 'NotEql':6,
'And':7, 'Xor':7, 'Xnor':7,
'Or':8,
'Land':9,
'Lor':10
}
def op2order(op):
if op not in operator_order:
return None
return operator_order[op]
| 155 | 0 | 46 |
0e81cd9725bccb7cfae13d7c944aa28d1ae47af7 | 727 | py | Python | script/dataset-txt-to-binary.py | rvs314/Montage | d4c49e66addefe947c03ff2bd0c463ebd2c34436 | [
"MIT"
] | 9 | 2020-10-04T22:03:31.000Z | 2021-10-08T01:52:57.000Z | script/dataset-txt-to-binary.py | rvs314/Montage | d4c49e66addefe947c03ff2bd0c463ebd2c34436 | [
"MIT"
] | 18 | 2020-10-20T02:39:12.000Z | 2021-08-30T00:23:32.000Z | script/dataset-txt-to-binary.py | rvs314/Montage | d4c49e66addefe947c03ff2bd0c463ebd2c34436 | [
"MIT"
] | 9 | 2020-10-04T22:06:11.000Z | 2021-02-19T17:23:17.000Z | #!/bin/python
import sys
import os
import re
import multiprocessing
pool = multiprocessing.Pool(multiprocessing.cpu_count())
for f in os.listdir('graph_data/'):
tmp = re.findall("orkut-edge-list_[0-9]+.txt", f)
if len(tmp) != 0:
pool.apply_async(work, (f,))
pool.close()
pool.join() | 26.925926 | 56 | 0.580468 | #!/bin/python
import sys
import os
import re
import multiprocessing
def work(f):
output = 'graph_data/' + f[:-4] + '.bin'
with open('graph_data/' + f) as i:
inputLines = i.readlines()
assert len(inputLines) > 0
with open(output, 'wb') as o:
for line in inputLines:
m = re.match('([0-9]+)\t([0-9]+)', line)
x,y = m.groups()
o.write(int(x).to_bytes(4, sys.byteorder))
o.write(int(y).to_bytes(4, sys.byteorder))
pool = multiprocessing.Pool(multiprocessing.cpu_count())
for f in os.listdir('graph_data/'):
tmp = re.findall("orkut-edge-list_[0-9]+.txt", f)
if len(tmp) != 0:
pool.apply_async(work, (f,))
pool.close()
pool.join() | 403 | 0 | 23 |
371254aa1ac49e4953c6a896823e19560b4024b9 | 3,900 | py | Python | waste/cli.py | tim-littlefair/tl-waste | 522110d049d77f0689feef66ad51894331521fce | [
"MIT"
] | null | null | null | waste/cli.py | tim-littlefair/tl-waste | 522110d049d77f0689feef66ad51894331521fce | [
"MIT"
] | null | null | null | waste/cli.py | tim-littlefair/tl-waste | 522110d049d77f0689feef66ad51894331521fce | [
"MIT"
] | null | null | null | # python3
# waste/cli.py
# Copyright Tim Littlefair 2020-
# This file is open source software under the MIT license.
# For terms of this license, see the file LICENSE in the source
# code distribution or visit
# https://opensource.org/licenses/mit-license.php
# This file defines the command line interface of the package
import argparse
import logging
import sys
import traceback
import botocore
# Logging needs to be enabled before some of the
# following imports as they can throw errors
_logger = logging.getLogger()
_logger.setLevel(logging.INFO)
try:
from .deploy.deploy_support import deploy_app
from .deploy.retire_support import retire_app
from .deploy.content_support import content_dir_to_in_memory_zip_stream
from .handler.shared import serialize_exception_for_log
except botocore.exceptions.ClientError as e:
if "InvalidClientTokenId" in str(e):
logging.error("Environment does not contain a valid AWS token")
sys.exit(2)
else:
raise
_ACTION_DEPLOY="deploy"
_ACTION_RETIRE="retire"
arg_parser = ArgParser()
args = arg_parser.parse_args()
try:
if args.action==_ACTION_DEPLOY:
content_zip_stream = None
if args.content_dir is not None:
content_zip_stream, _ = content_dir_to_in_memory_zip_stream(
args.content_dir
)
deploy_app(
args.app_name, content_zip_stream,
default_doc_name = args.index_doc,
cache_zip_path = args.cache_zip_path,
create_groups = args.create_iam_groups
)
elif args.action==_ACTION_RETIRE:
retire_app(args.app_name)
else:
print("Unsupported action",args.action)
arg_parser.print_help()
sys.exit(1)
#except SystemExit:
# raise
#except NotImplementedError:
# pass
except botocore.exceptions.ClientError as e:
if "InvalidClientTokenId" in str(e):
logging.error("Environment does not contain a valid AWS token")
sys.exit(2)
else:
pass
except:
serialize_exception_for_log(e)
sys.exit(3)
| 33.333333 | 89 | 0.634103 | # python3
# waste/cli.py
# Copyright Tim Littlefair 2020-
# This file is open source software under the MIT license.
# For terms of this license, see the file LICENSE in the source
# code distribution or visit
# https://opensource.org/licenses/mit-license.php
# This file defines the command line interface of the package
import argparse
import logging
import sys
import traceback
import botocore
# Logging needs to be enabled before some of the
# following imports as they can throw errors
_logger = logging.getLogger()
_logger.setLevel(logging.INFO)
try:
from .deploy.deploy_support import deploy_app
from .deploy.retire_support import retire_app
from .deploy.content_support import content_dir_to_in_memory_zip_stream
from .handler.shared import serialize_exception_for_log
except botocore.exceptions.ClientError as e:
if "InvalidClientTokenId" in str(e):
logging.error("Environment does not contain a valid AWS token")
sys.exit(2)
else:
raise
_ACTION_DEPLOY="deploy"
_ACTION_RETIRE="retire"
class ArgParser(argparse.ArgumentParser):
def __init__(self):
super().__init__()
self.add_argument(
"action", type=str, choices=[_ACTION_DEPLOY,_ACTION_RETIRE],
help="Operation to be performed"
)
self.add_argument(
"app_name", type=str,
help="Name of application to be deployed"
)
self.add_argument(
"--content-dir", type=str, action="store",
help="Directory containing content to be served"
" (ignored if action=" + _ACTION_RETIRE + ")"
)
self.add_argument(
"--index-doc", type=str, action="store",
default="index.html",
help="Default document name if path matches a folder"
" (ignored if action=" + _ACTION_RETIRE + ")"
)
self.add_argument(
"--cache-zip-path", type=str, action="store", default=None,
help="Zipfile path under content_dir containing files to be cached in memory"
" (ignored if action=" + _ACTION_RETIRE + ")"
)
self.add_argument(
"--preserve-outdated", action="store_true",
help="Suppress retirement of previously deployed baselines of the same app"
" (ignored if action=" + _ACTION_RETIRE + ")"
)
self.add_argument(
"--create-iam-groups", action="store_true",
help="Create IAM groups which can be used to assign AWS console users rights"
" to view the app, edit storage content, and edit lambdas"
)
self.add_argument(
"--api-key",
default = None,
help = "API key for the app,"
" or '*' for an API key to be generated, or None for no API key"
)
arg_parser = ArgParser()
args = arg_parser.parse_args()
try:
if args.action==_ACTION_DEPLOY:
content_zip_stream = None
if args.content_dir is not None:
content_zip_stream, _ = content_dir_to_in_memory_zip_stream(
args.content_dir
)
deploy_app(
args.app_name, content_zip_stream,
default_doc_name = args.index_doc,
cache_zip_path = args.cache_zip_path,
create_groups = args.create_iam_groups
)
elif args.action==_ACTION_RETIRE:
retire_app(args.app_name)
else:
print("Unsupported action",args.action)
arg_parser.print_help()
sys.exit(1)
#except SystemExit:
# raise
#except NotImplementedError:
# pass
except botocore.exceptions.ClientError as e:
if "InvalidClientTokenId" in str(e):
logging.error("Environment does not contain a valid AWS token")
sys.exit(2)
else:
pass
except:
serialize_exception_for_log(e)
sys.exit(3)
| 1,744 | 20 | 49 |
e7ef009d0fb5ad446ef872c0338222f7866a959e | 758 | py | Python | _gather_docs.py | gh640/shell-utils | 2bb23a1a4238ca812b7080ded8f687beca3bff4e | [
"MIT"
] | null | null | null | _gather_docs.py | gh640/shell-utils | 2bb23a1a4238ca812b7080ded8f687beca3bff4e | [
"MIT"
] | 6 | 2018-07-02T13:40:46.000Z | 2019-03-10T03:27:54.000Z | _gather_docs.py | gh640/shell-utils | 2bb23a1a4238ca812b7080ded8f687beca3bff4e | [
"MIT"
] | null | null | null | '''Gathers the module docs in `.py`.
'''
import importlib
from pathlib import Path
TARGET_SUFFIXES = ('.py',)
EXCLUDED_PREFIX = '_'
TEMPLATE_ITEM = '- `{}`: {}'
if __name__ == '__main__':
main()
| 20.486486 | 70 | 0.672823 | '''Gathers the module docs in `.py`.
'''
import importlib
from pathlib import Path
TARGET_SUFFIXES = ('.py',)
EXCLUDED_PREFIX = '_'
TEMPLATE_ITEM = '- `{}`: {}'
def main():
print_module_doc(x for x in get_entries_in_script_dir(is_target))
def get_entries_in_script_dir(rule):
path = Path(__file__).resolve().parent
return (x for x in path.iterdir() if rule(x))
def is_target(path):
return (
path.is_file()
and path.suffix in TARGET_SUFFIXES
and not path.name.startswith(EXCLUDED_PREFIX)
)
def print_module_doc(paths):
for path in paths:
module = importlib.import_module(path.stem)
print(TEMPLATE_ITEM.format(path.name, module.__doc__.strip()))
if __name__ == '__main__':
main()
| 459 | 0 | 92 |
33243a156837ff8e81b84b33b3fe60babe6f38c8 | 1,132 | py | Python | seriouslylib/iterable.py | Mego/Seriously | 07b256e4f35f5efec3b01434300f9ccc551b1c3e | [
"MIT"
] | 104 | 2015-11-02T00:08:32.000Z | 2022-02-17T23:17:14.000Z | seriouslylib/iterable.py | Mego/Seriously | 07b256e4f35f5efec3b01434300f9ccc551b1c3e | [
"MIT"
] | 68 | 2015-11-09T05:33:24.000Z | 2020-04-10T06:46:54.000Z | seriouslylib/iterable.py | Mego/Seriously | 07b256e4f35f5efec3b01434300f9ccc551b1c3e | [
"MIT"
] | 25 | 2015-11-19T05:34:09.000Z | 2021-07-20T13:54:03.000Z | #!/usr/bin/env python3
from collections import deque as _deque
from collections import Iterable
from itertools import islice, zip_longest as izip
| 31.444444 | 78 | 0.603357 | #!/usr/bin/env python3
from collections import deque as _deque
from collections import Iterable
from itertools import islice, zip_longest as izip
def as_list(val, wrap=True):
#strings are iterables all the way down, so an exception needs to be made
# else we get infinite recursion, which is bad
# this only took me 2 hours to debug, new record!
if not isinstance(val, Iterable) or isinstance(val, str):
return [val] if wrap else val
else:
return [as_list(x, wrap=False) for x in val]
class deque(_deque):
def copy(self):
if hasattr(_deque, 'copy'):
return _deque.copy(self)
else:
return deque(x for x in self)
def __getitem__(self, key):
if isinstance(key, slice):
return [x for x in self][key]
else:
return _deque.__getitem__(self, key)
def reversed(self):
tmp = self.copy()
tmp.reverse()
return tmp
def zip_longest(*iterables):
for vals in izip(*iterables):
yield filter(lambda x:x is not None, vals) | 799 | -1 | 183 |
54cdc86a431feab1e8839c566bd4ed3eb49fab86 | 513 | py | Python | visualizeXY.py | Myunghee13/DSCI560_HW2 | b951a10f3fa0ad5807e980c3f172f8f66d7d3796 | [
"CC0-1.0"
] | null | null | null | visualizeXY.py | Myunghee13/DSCI560_HW2 | b951a10f3fa0ad5807e980c3f172f8f66d7d3796 | [
"CC0-1.0"
] | null | null | null | visualizeXY.py | Myunghee13/DSCI560_HW2 | b951a10f3fa0ad5807e980c3f172f8f66d7d3796 | [
"CC0-1.0"
] | null | null | null | # 1. c. Visualize the results
from pathlib import Path
import matplotlib.pyplot as plt
output_folder = Path("output")
# read intermediate results
with open(output_folder / "xNumbers.txt",'r') as f:
xNum = [int(ele.strip()) for ele in f.readlines()]
with open(output_folder / "yNumbers.txt",'r') as f:
yNum = [int(ele.strip()) for ele in f.readlines()]
# visualize graph
fig = plt.figure()
plt.scatter(xNum, yNum)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
fig.savefig(output_folder / 'xyGraph.png')
| 22.304348 | 54 | 0.695906 | # 1. c. Visualize the results
from pathlib import Path
import matplotlib.pyplot as plt
output_folder = Path("output")
# read intermediate results
with open(output_folder / "xNumbers.txt",'r') as f:
xNum = [int(ele.strip()) for ele in f.readlines()]
with open(output_folder / "yNumbers.txt",'r') as f:
yNum = [int(ele.strip()) for ele in f.readlines()]
# visualize graph
fig = plt.figure()
plt.scatter(xNum, yNum)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
fig.savefig(output_folder / 'xyGraph.png')
| 0 | 0 | 0 |
8c47558185e3e4480a104cedaaf10616e8918a93 | 1,159 | py | Python | env/Lib/site-packages/pylint/__init__.py | aammjian/cotton | f72b814f795f79a4054688e465c8b0ae5560f3b7 | [
"Apache-2.0"
] | 33 | 2020-10-05T01:04:55.000Z | 2021-06-24T01:52:31.000Z | env/Lib/site-packages/pylint/__init__.py | aammjian/cotton | f72b814f795f79a4054688e465c8b0ae5560f3b7 | [
"Apache-2.0"
] | 14 | 2020-10-07T03:15:12.000Z | 2021-01-15T11:53:29.000Z | env/Lib/site-packages/pylint/__init__.py | aammjian/cotton | f72b814f795f79a4054688e465c8b0ae5560f3b7 | [
"Apache-2.0"
] | 11 | 2020-07-31T08:20:43.000Z | 2020-08-21T04:08:29.000Z | # Copyright (c) 2008, 2012 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2014, 2016-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2018 Nick Drozd <nicholasdrozd@gmail.com>
# Copyright (c) 2020 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
import sys
from pylint.__pkginfo__ import version as __version__
# pylint: disable=import-outside-toplevel
def run_pyreverse():
"""run pyreverse"""
from pylint.pyreverse.main import Run as PyreverseRun
PyreverseRun(sys.argv[1:])
def run_symilar():
"""run symilar"""
from pylint.checkers.similar import Run as SimilarRun
SimilarRun(sys.argv[1:])
| 25.755556 | 80 | 0.719586 | # Copyright (c) 2008, 2012 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2014, 2016-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2018 Nick Drozd <nicholasdrozd@gmail.com>
# Copyright (c) 2020 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
import sys
from pylint.__pkginfo__ import version as __version__
# pylint: disable=import-outside-toplevel
def run_pylint():
from pylint.lint import Run as PylintRun
try:
PylintRun(sys.argv[1:])
except KeyboardInterrupt:
sys.exit(1)
def run_epylint():
from pylint.epylint import Run as EpylintRun
EpylintRun()
def run_pyreverse():
"""run pyreverse"""
from pylint.pyreverse.main import Run as PyreverseRun
PyreverseRun(sys.argv[1:])
def run_symilar():
"""run symilar"""
from pylint.checkers.similar import Run as SimilarRun
SimilarRun(sys.argv[1:])
| 197 | 0 | 46 |
8f8d1f53882f5c349d916c1ca9e5d20ba3603757 | 3,357 | py | Python | tests/test_conf.py | hero0926/bottery | 1c724b867fa16708d59a3dbba5dd2c3de85147a9 | [
"MIT"
] | 250 | 2017-09-16T14:40:51.000Z | 2021-05-25T12:27:47.000Z | tests/test_conf.py | hero0926/bottery | 1c724b867fa16708d59a3dbba5dd2c3de85147a9 | [
"MIT"
] | 135 | 2017-09-16T14:48:53.000Z | 2019-07-25T12:10:46.000Z | tests/test_conf.py | hero0926/bottery | 1c724b867fa16708d59a3dbba5dd2c3de85147a9 | [
"MIT"
] | 78 | 2017-09-28T23:34:23.000Z | 2021-08-03T15:24:38.000Z | from unittest import mock
import pytest
from bottery.conf import (LazySettings, Settings, UserSettingsHolder,
lazy_obj_method)
@mock.patch('bottery.conf.Settings')
@mock.patch('bottery.conf.sys')
@mock.patch('bottery.conf.import_module')
@mock.patch('bottery.conf.os.getcwd', return_value='test_settings')
@mock.patch('bottery.conf.import_module')
| 25.431818 | 76 | 0.705392 | from unittest import mock
import pytest
from bottery.conf import (LazySettings, Settings, UserSettingsHolder,
lazy_obj_method)
def test_lazy_obj_method():
class Settings:
_wrapped = None
_setup = mock.Mock()
__dir__ = lazy_obj_method(dir)
settings = Settings()
dir(settings)
assert settings._setup.called is True
@mock.patch('bottery.conf.Settings')
def test_lazysettings_setup(mock_settings):
lazy_settings = LazySettings()
lazy_settings._setup()
assert mock_settings.called is True
assert lazy_settings._wrapped.configure.called is True
def test_lazysettings_set_wrapped():
lazy_settings = LazySettings()
lazy_settings._wrapped = 'test'
assert lazy_settings._wrapped == 'test'
def test_lazysettings_setattr():
lazy_settings = LazySettings()
lazy_settings._wrapped = type('_wrapped', (), {})
lazy_settings.attr = 'value'
assert lazy_settings._wrapped.attr == 'value'
def test_lazysettings_configure():
lazy_settings = LazySettings()
lazy_settings.configure(attr='value')
# Default settings
assert lazy_settings.TEMPLATES == []
assert lazy_settings.PLATFORMS == {}
assert lazy_settings.MIDDLEWARES == []
# Settings by params
assert lazy_settings.attr == 'value'
def test_lazysettings_already_configured():
lazy_settings = LazySettings()
lazy_settings._wrapped = 'settings'
with pytest.raises(RuntimeError):
lazy_settings.configure()
def test_settings_configure():
settings = Settings()
settings.global_settings = mock.Mock()
settings.import_settings = mock.Mock()
settings.configure()
assert settings.global_settings.called is True
assert settings.import_settings.called is True
def test_settings_global_settings():
settings = Settings()
settings.global_settings()
assert settings.TEMPLATES == []
assert settings.PLATFORMS == {}
assert settings.MIDDLEWARES == []
@mock.patch('bottery.conf.sys')
@mock.patch('bottery.conf.import_module')
@mock.patch('bottery.conf.os.getcwd', return_value='test_settings')
def test_settings_local_settings(mock_getcwd, mock_import_module, mock_sys):
mock_sys.path = []
settings = Settings()
settings.local_settings()
assert mock_sys.path[0] == 'test_settings'
assert mock_import_module.called is True
assert mock_getcwd.called is True
def test_settings_setattr_module():
mod = type('Settings', (), {'VALID': True, 'invalid': False})
settings = Settings()
settings.setattr_module(mod)
assert settings.VALID
assert not hasattr(settings, 'invalid')
@mock.patch('bottery.conf.import_module')
def test_settings_import_settings(mock_import_module):
mod = type('Settings', (), {
'DEBUG': True,
'anotherconf': True,
})
settings = Settings()
settings.local_settings = mock.Mock(return_value=mod)
settings.import_settings()
assert settings.DEBUG
def test_usersettingsholder():
templates = []
default_settings = type('Settings', (), {
'TEMPLATES': templates,
'anotherconf': True,
})
settings = UserSettingsHolder(default_settings)
assert settings.TEMPLATES == templates
assert id(settings.TEMPLATES) != id(templates)
assert not hasattr(settings, 'anotherconf')
| 2,693 | 0 | 273 |
95a47d457bca06a93badfbde56f90233f91eb246 | 1,172 | py | Python | scripts/tiffs_from_h5.py | glemaitre/hexrd | b68b1ba72e0f480d29bdaae2adbd6c6e2380cc7c | [
"BSD-3-Clause"
] | 27 | 2020-02-18T12:15:08.000Z | 2022-03-24T17:53:46.000Z | scripts/tiffs_from_h5.py | glemaitre/hexrd | b68b1ba72e0f480d29bdaae2adbd6c6e2380cc7c | [
"BSD-3-Clause"
] | 259 | 2020-02-02T22:18:29.000Z | 2022-03-30T19:59:58.000Z | scripts/tiffs_from_h5.py | glemaitre/hexrd | b68b1ba72e0f480d29bdaae2adbd6c6e2380cc7c | [
"BSD-3-Clause"
] | 11 | 2020-02-18T12:14:44.000Z | 2022-03-04T16:19:11.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 31 18:30:53 2019
@author: bernier2
"""
import os
import numpy as np
from hexrd import imageseries
from skimage import io
# dirs
working_dir = '/Users/Shared/APS/PUP_AFRL_Feb19'
image_dir = os.path.join(working_dir, 'image_data')
samp_name = 'ceria_cal'
scan_number = 0
tif_file_template = samp_name + '_%06d-%s.tif'
raw_data_dir_template = os.path.join(
image_dir,
'raw_images_%s_%06d-%s.yml'
)
yml_string = """
image-files:
directory: %s
files: "%s"
options:
empty-frames: 0
max-frames: 0
meta:
panel: %s
"""
ims = imageseries.open(
os.path.join(image_dir, 'ceria_cal.h5'),
'hdf5',
path='/imageseries'
)
metadata = ims.metadata
det_keys = np.array(metadata['panels'], dtype=str)
for i, det_key in enumerate(det_keys):
yml_file = open(
raw_data_dir_template % (samp_name, scan_number, det_key),
'w'
)
tiff_fname = tif_file_template % (scan_number, det_key)
print(yml_string % (image_dir, tiff_fname, det_key),
file=yml_file)
io.imsave(
os.path.join(image_dir, tiff_fname),
ims[i]
)
pass
| 17.757576 | 66 | 0.66041 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 31 18:30:53 2019
@author: bernier2
"""
import os
import numpy as np
from hexrd import imageseries
from skimage import io
# dirs
working_dir = '/Users/Shared/APS/PUP_AFRL_Feb19'
image_dir = os.path.join(working_dir, 'image_data')
samp_name = 'ceria_cal'
scan_number = 0
tif_file_template = samp_name + '_%06d-%s.tif'
raw_data_dir_template = os.path.join(
image_dir,
'raw_images_%s_%06d-%s.yml'
)
yml_string = """
image-files:
directory: %s
files: "%s"
options:
empty-frames: 0
max-frames: 0
meta:
panel: %s
"""
ims = imageseries.open(
os.path.join(image_dir, 'ceria_cal.h5'),
'hdf5',
path='/imageseries'
)
metadata = ims.metadata
det_keys = np.array(metadata['panels'], dtype=str)
for i, det_key in enumerate(det_keys):
yml_file = open(
raw_data_dir_template % (samp_name, scan_number, det_key),
'w'
)
tiff_fname = tif_file_template % (scan_number, det_key)
print(yml_string % (image_dir, tiff_fname, det_key),
file=yml_file)
io.imsave(
os.path.join(image_dir, tiff_fname),
ims[i]
)
pass
| 0 | 0 | 0 |
785bac4558f57f6693632e4bc7ad5d2d9a110e05 | 45 | py | Python | fHDHR/__init__.py | crackers8199/fHDHR_Locast | cad9cc0bf64f70bbcd2e702a938794d4eacad6cf | [
"WTFPL"
] | null | null | null | fHDHR/__init__.py | crackers8199/fHDHR_Locast | cad9cc0bf64f70bbcd2e702a938794d4eacad6cf | [
"WTFPL"
] | null | null | null | fHDHR/__init__.py | crackers8199/fHDHR_Locast | cad9cc0bf64f70bbcd2e702a938794d4eacad6cf | [
"WTFPL"
] | null | null | null | # coding=utf-8
fHDHR_VERSION = "v0.3.0-beta"
| 15 | 29 | 0.688889 | # coding=utf-8
fHDHR_VERSION = "v0.3.0-beta"
| 0 | 0 | 0 |
5d52d54afc4948ea4b7febdfa8d2714599820780 | 2,781 | py | Python | test/test_compare_license_template_script.py | anshuldutt21/spdx_python_licensematching | a409d7e1d024bc64d13c831989e61e0e3355eea1 | [
"Apache-2.0"
] | 1 | 2021-05-31T03:09:12.000Z | 2021-05-31T03:09:12.000Z | test/test_compare_license_template_script.py | anshuldutt21/spdx_python_licensematching | a409d7e1d024bc64d13c831989e61e0e3355eea1 | [
"Apache-2.0"
] | 5 | 2020-09-17T14:41:48.000Z | 2020-10-07T07:24:11.000Z | test/test_compare_license_template_script.py | anshuldutt21/spdx_python_licensematching | a409d7e1d024bc64d13c831989e61e0e3355eea1 | [
"Apache-2.0"
] | null | null | null | import unittest
import os
from pathlib import Path
from normalize_license_text.normalize_class import NormalizeText
from configuration.config import PACKAGE_PATH
from compare_template_text.normalize_template_text import NormalizeTemplate
from compare_template_text.compare_normalized_files import CompareNormalizedFiles
input_text = str(Path(PACKAGE_PATH + "\\test\\data\\OBSD.txt"))
input_text = input_text.replace('\\',os.sep)
input_text_mismatch = str(Path(PACKAGE_PATH + "\\test\\data\\OBSD3.txt"))
input_text_mismatch = input_text_mismatch.replace('\\',os.sep)
input_template = str(Path(PACKAGE_PATH + "\\test\\data\\OBSD_template.txt"))
input_template = input_template.replace('\\',os.sep)
if __name__ == '__main__':
unittest.main()
| 40.304348 | 106 | 0.701546 | import unittest
import os
from pathlib import Path
from normalize_license_text.normalize_class import NormalizeText
from configuration.config import PACKAGE_PATH
from compare_template_text.normalize_template_text import NormalizeTemplate
from compare_template_text.compare_normalized_files import CompareNormalizedFiles
input_text = str(Path(PACKAGE_PATH + "\\test\\data\\OBSD.txt"))
input_text = input_text.replace('\\',os.sep)
input_text_mismatch = str(Path(PACKAGE_PATH + "\\test\\data\\OBSD3.txt"))
input_text_mismatch = input_text_mismatch.replace('\\',os.sep)
input_template = str(Path(PACKAGE_PATH + "\\test\\data\\OBSD_template.txt"))
input_template = input_template.replace('\\',os.sep)
class TestAllTexts(unittest.TestCase):
def test_template_match(self):
with open(input_text, 'r') as inputfile:
input_text_string = inputfile.read()
inputfile.close()
x = NormalizeText(input_text_string)
normalized_text_string = x.returnfinalstring_for_template()
with open(input_template, 'r') as input_file:
input_template_file = input_file.read()
input_file.close()
object_normalization = NormalizeText(input_template_file)
input_template_file = object_normalization.returnfinalstring_for_template()
y = NormalizeTemplate(
normalized_text_string, input_template_file
)
y.normalize_template()
normalized_template_string = y.return_normalized_template()
normalized_text_string = y.return_normalized_text()
self.assertEqual(True,CompareNormalizedFiles(normalized_template_string, normalized_text_string))
def test_template_mismatch(self):
with open(input_text_mismatch, 'r') as inputfile:
input_text_string = inputfile.read()
inputfile.close()
x = NormalizeText(input_text_string)
normalized_text_string = x.returnfinalstring_for_template()
with open(input_template, 'r') as input_file:
input_template_file = input_file.read()
input_file.close()
object_normalization = NormalizeText(input_template_file)
input_template_file = object_normalization.returnfinalstring_for_template()
y = NormalizeTemplate(
normalized_text_string, input_template_file
)
y.normalize_template()
normalized_template_string = y.return_normalized_template()
normalized_text_string = y.return_normalized_text()
self.assertEqual(False,CompareNormalizedFiles(normalized_template_string, normalized_text_string))
if __name__ == '__main__':
unittest.main()
| 1,923 | 17 | 84 |
379dd518a9f7ab68255775b31015a88d60d5cd9d | 5,881 | py | Python | voipms/entities/clientsget.py | 4doom4/python-voipms | 3159ccfaf1ed9f5fef431fa3d2fdd54b9d3b1b3c | [
"MIT"
] | 14 | 2017-06-26T16:22:59.000Z | 2022-03-10T13:22:49.000Z | voipms/entities/clientsget.py | judahpaul16/python-voipms | 4e1eb51f927b9e0924091f7bbf25ccc2193c3bac | [
"MIT"
] | 8 | 2018-02-15T18:25:48.000Z | 2022-03-29T06:17:00.000Z | voipms/entities/clientsget.py | judahpaul16/python-voipms | 4e1eb51f927b9e0924091f7bbf25ccc2193c3bac | [
"MIT"
] | 8 | 2019-02-22T00:42:25.000Z | 2022-02-14T19:50:41.000Z | # coding=utf-8
"""
The Clients API endpoint get
Documentation: https://voip.ms/m/apidocs.php
"""
from voipms.baseapi import BaseApi
class ClientsGet(BaseApi):
"""
Get for the Clients endpoint.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(ClientsGet, self).__init__(*args, **kwargs)
self.endpoint = 'clients'
def balance_management(self, balance_management=None):
"""
Retrieves a list of Balance Management Options if no additional parameter is provided
- Retrieves a specific Balance Management Option if a code is provided
:param balance_management: Code for a specific Balance Management Setting (Example: 1)
:type balance_management: :py:class:`int`
:returns: :py:class:`dict`
"""
method = "getBalanceManagement"
parameters = {}
if balance_management:
if not isinstance(balance_management, int):
raise ValueError("Code for a specific Balance Management Setting needs to be an int (Example: 1)")
parameters["balance_management"] = balance_management
return self._voipms_client._get(method, parameters)
def charges(self, client):
"""
Retrieves Charges made to a specific Reseller Client
:param client: [Required] ID for a specific Reseller Client (Example: 561115)
:type client: :py:class:`int`
:returns: :py:class:`dict`
"""
method = "getCharges"
parameters = {}
if client:
if not isinstance(client, int):
raise ValueError("ID for a specific Reseller Client needs to be an int (Example: 561115)")
parameters["client"] = client
return self._voipms_client._get(method, parameters)
def client_packages(self, client):
"""
Retrieves a list of Packages for a specific Reseller Client
:param client: [Required] ID for a specific Reseller Client (Example: 561115)
:type client: :py:class:`int`
:returns: :py:class:`dict`
"""
method = "getClientPackages"
parameters = {}
if client:
if not isinstance(client, int):
raise ValueError("ID for a specific Reseller Client needs to be an int (Example: 561115)")
parameters["client"] = client
return self._voipms_client._get(method, parameters)
def clients(self, client=None):
"""
Retrieves a list of all Clients if no additional parameter is provided
- Retrieves a specific Reseller Client if a Reseller Client ID is provided
- Retrieves a specific Reseller Client if a Reseller Client e-mail is provided
:param client: Parameter could have the following values:
* Empty Value [Not Required]
* Specific Reseller Client ID (Example: 561115)
* Specific Reseller Client e-mail (Example: 'john.doe@mydomain.com')
:type client: :py:class:`int` or `str` or ``
:returns: :py:class:`dict`
"""
method = "getClients"
if not client:
client = ""
parameters = {
"client": client,
}
return self._voipms_client._get(method, parameters)
def client_threshold(self, client):
"""
Retrieves the Threshold Information for a specific Reseller Client
:param client: [Required] ID for a specific Reseller Client (Example: 561115)
:type client: :py:class:`int`
:returns: :py:class:`dict`
"""
method = "getClientThreshold"
parameters = {}
if client:
if not isinstance(client, int):
raise ValueError("ID for a specific Reseller Client needs to be an int (Example: 561115)")
parameters["client"] = client
return self._voipms_client._get(method, parameters)
def deposits(self, client):
"""
Retrieves Deposits made for a specific Reseller Client
:param client: [Required] ID for a specific Reseller Client (Example: 561115)
:type client: :py:class:`int`
:returns: :py:class:`dict`
"""
method = "getDeposits"
parameters = {}
if client:
if not isinstance(client, int):
raise ValueError("ID for a specific Reseller Client needs to be an int (Example: 561115)")
parameters["client"] = client
return self._voipms_client._get(method, parameters)
def packages(self, package=None):
"""
Retrieves Deposits made for a specific Reseller Client
:param package: Code for a specific Package (Example: 8378)
:type package: :py:class:`int`
:returns: :py:class:`dict`
"""
method = "getPackages"
parameters = {}
if package:
if not isinstance(package, int):
raise ValueError("Code for a specific Package needs to be an int (Example: 8378)")
parameters["package"] = package
return self._voipms_client._get(method, parameters)
def reseller_balance(self, client):
"""
Retrieves Balance and Calls Statistics for a specific Reseller Client for the last 30 days and current day
:param client: [Required] ID for a specific Reseller Client (Example: 561115)
:type client: :py:class:`int`
:returns: :py:class:`dict`
"""
method = "getResellerBalance"
parameters = {}
if client:
if not isinstance(client, int):
raise ValueError("ID for a specific Reseller Client needs to be an int (Example: 561115)")
parameters["client"] = client
return self._voipms_client._get(method, parameters)
| 33.99422 | 114 | 0.606019 | # coding=utf-8
"""
The Clients API endpoint get
Documentation: https://voip.ms/m/apidocs.php
"""
from voipms.baseapi import BaseApi
class ClientsGet(BaseApi):
"""
Get for the Clients endpoint.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(ClientsGet, self).__init__(*args, **kwargs)
self.endpoint = 'clients'
def balance_management(self, balance_management=None):
"""
Retrieves a list of Balance Management Options if no additional parameter is provided
- Retrieves a specific Balance Management Option if a code is provided
:param balance_management: Code for a specific Balance Management Setting (Example: 1)
:type balance_management: :py:class:`int`
:returns: :py:class:`dict`
"""
method = "getBalanceManagement"
parameters = {}
if balance_management:
if not isinstance(balance_management, int):
raise ValueError("Code for a specific Balance Management Setting needs to be an int (Example: 1)")
parameters["balance_management"] = balance_management
return self._voipms_client._get(method, parameters)
def charges(self, client):
"""
Retrieves Charges made to a specific Reseller Client
:param client: [Required] ID for a specific Reseller Client (Example: 561115)
:type client: :py:class:`int`
:returns: :py:class:`dict`
"""
method = "getCharges"
parameters = {}
if client:
if not isinstance(client, int):
raise ValueError("ID for a specific Reseller Client needs to be an int (Example: 561115)")
parameters["client"] = client
return self._voipms_client._get(method, parameters)
def client_packages(self, client):
"""
Retrieves a list of Packages for a specific Reseller Client
:param client: [Required] ID for a specific Reseller Client (Example: 561115)
:type client: :py:class:`int`
:returns: :py:class:`dict`
"""
method = "getClientPackages"
parameters = {}
if client:
if not isinstance(client, int):
raise ValueError("ID for a specific Reseller Client needs to be an int (Example: 561115)")
parameters["client"] = client
return self._voipms_client._get(method, parameters)
def clients(self, client=None):
"""
Retrieves a list of all Clients if no additional parameter is provided
- Retrieves a specific Reseller Client if a Reseller Client ID is provided
- Retrieves a specific Reseller Client if a Reseller Client e-mail is provided
:param client: Parameter could have the following values:
* Empty Value [Not Required]
* Specific Reseller Client ID (Example: 561115)
* Specific Reseller Client e-mail (Example: 'john.doe@mydomain.com')
:type client: :py:class:`int` or `str` or ``
:returns: :py:class:`dict`
"""
method = "getClients"
if not client:
client = ""
parameters = {
"client": client,
}
return self._voipms_client._get(method, parameters)
def client_threshold(self, client):
"""
Retrieves the Threshold Information for a specific Reseller Client
:param client: [Required] ID for a specific Reseller Client (Example: 561115)
:type client: :py:class:`int`
:returns: :py:class:`dict`
"""
method = "getClientThreshold"
parameters = {}
if client:
if not isinstance(client, int):
raise ValueError("ID for a specific Reseller Client needs to be an int (Example: 561115)")
parameters["client"] = client
return self._voipms_client._get(method, parameters)
def deposits(self, client):
"""
Retrieves Deposits made for a specific Reseller Client
:param client: [Required] ID for a specific Reseller Client (Example: 561115)
:type client: :py:class:`int`
:returns: :py:class:`dict`
"""
method = "getDeposits"
parameters = {}
if client:
if not isinstance(client, int):
raise ValueError("ID for a specific Reseller Client needs to be an int (Example: 561115)")
parameters["client"] = client
return self._voipms_client._get(method, parameters)
def packages(self, package=None):
"""
Retrieves Deposits made for a specific Reseller Client
:param package: Code for a specific Package (Example: 8378)
:type package: :py:class:`int`
:returns: :py:class:`dict`
"""
method = "getPackages"
parameters = {}
if package:
if not isinstance(package, int):
raise ValueError("Code for a specific Package needs to be an int (Example: 8378)")
parameters["package"] = package
return self._voipms_client._get(method, parameters)
def reseller_balance(self, client):
"""
Retrieves Balance and Calls Statistics for a specific Reseller Client for the last 30 days and current day
:param client: [Required] ID for a specific Reseller Client (Example: 561115)
:type client: :py:class:`int`
:returns: :py:class:`dict`
"""
method = "getResellerBalance"
parameters = {}
if client:
if not isinstance(client, int):
raise ValueError("ID for a specific Reseller Client needs to be an int (Example: 561115)")
parameters["client"] = client
return self._voipms_client._get(method, parameters)
| 0 | 0 | 0 |
c021a1ce6f4526c4f085e79c8722a08cd4e00528 | 560 | py | Python | altair/display/__init__.py | jakevdp/altair2 | 46d391034c5b72867c9e4d01f3a7c7c536533add | [
"BSD-3-Clause"
] | 2 | 2018-02-03T05:35:52.000Z | 2018-02-05T21:00:18.000Z | altair/display/__init__.py | jakevdp/altair2 | 46d391034c5b72867c9e4d01f3a7c7c536533add | [
"BSD-3-Clause"
] | null | null | null | altair/display/__init__.py | jakevdp/altair2 | 46d391034c5b72867c9e4d01f3a7c7c536533add | [
"BSD-3-Clause"
] | null | null | null | from .mixins import VegaDisplayMixin
from . import utils
| 24.347826 | 64 | 0.651786 | from .mixins import VegaDisplayMixin
from . import utils
class VegaLite(VegaDisplayMixin):
def __init__(self, spec, data=None):
self.spec = spec
self.data = data
def _get_spec_info(self):
spec = utils.prepare_vegalite_spec(self.spec, self.data)
return (spec, 'vega-lite')
class Vega(VegaDisplayMixin):
def __init__(self, spec, data=None):
self.spec = spec
self.data = data
def _get_spec_info(self):
spec = utils.prepare_vega_spec(self.spec, self.data)
return (spec, 'vega')
| 329 | 20 | 152 |
13b429b81010ca853bf0a7c55e57c20f41b5c98f | 1,540 | py | Python | matrix_traversal/utils.py | SiberiaMan/Avitotech | 0f17bedd157973ad3f5a3fa748a4892eb1a42204 | [
"MIT"
] | null | null | null | matrix_traversal/utils.py | SiberiaMan/Avitotech | 0f17bedd157973ad3f5a3fa748a4892eb1a42204 | [
"MIT"
] | null | null | null | matrix_traversal/utils.py | SiberiaMan/Avitotech | 0f17bedd157973ad3f5a3fa748a4892eb1a42204 | [
"MIT"
] | null | null | null | import validators
import aiohttp
from typing import List
from typing import Optional
def check_url(url: str) -> Optional[bool]:
"""
This function checks is valid URL or not
:param url: URL
:return: True if is valid, else False
"""
if validators.url(url):
return True
return False
async def get_formatted_matrix(resp: aiohttp.client.ClientResponse) -> List[List[int]]:
"""
This function creates and returns a formatted matrix from the server's response
:param resp: received response from server
:return: formatted [int] matrix
"""
chunk_size = 1024
with open('.matrix.txt', 'wb') as fd:
while True:
chunk = await resp.content.read(chunk_size)
if not chunk:
break
fd.write(chunk)
with open('.matrix.txt', 'r') as fd:
matrix = fd.readlines()
matrix = matrix[1::2]
new_matrix = []
for line in matrix:
line = list(map(int, line.strip().replace('|', '').split()))
new_matrix.append(line)
return new_matrix
def traverse_matrix_counterclockwise(matrix: List[List[int]]) -> List[int]:
"""
This function traverses the matrix counterclockwise and returns a list
:param matrix: formatted matrix
:return: a list obtained by traversing the matrix counterclockwise
"""
matrix = list(zip(*matrix[:]))[:] # rows -> columns, columns -> rows
lst = []
while matrix:
lst += matrix[0]
matrix = list(zip(*matrix[1:]))[::-1]
return lst
| 29.056604 | 87 | 0.625974 | import validators
import aiohttp
from typing import List
from typing import Optional
def check_url(url: str) -> Optional[bool]:
"""
This function checks is valid URL or not
:param url: URL
:return: True if is valid, else False
"""
if validators.url(url):
return True
return False
async def get_formatted_matrix(resp: aiohttp.client.ClientResponse) -> List[List[int]]:
"""
This function creates and returns a formatted matrix from the server's response
:param resp: received response from server
:return: formatted [int] matrix
"""
chunk_size = 1024
with open('.matrix.txt', 'wb') as fd:
while True:
chunk = await resp.content.read(chunk_size)
if not chunk:
break
fd.write(chunk)
with open('.matrix.txt', 'r') as fd:
matrix = fd.readlines()
matrix = matrix[1::2]
new_matrix = []
for line in matrix:
line = list(map(int, line.strip().replace('|', '').split()))
new_matrix.append(line)
return new_matrix
def traverse_matrix_counterclockwise(matrix: List[List[int]]) -> List[int]:
"""
This function traverses the matrix counterclockwise and returns a list
:param matrix: formatted matrix
:return: a list obtained by traversing the matrix counterclockwise
"""
matrix = list(zip(*matrix[:]))[:] # rows -> columns, columns -> rows
lst = []
while matrix:
lst += matrix[0]
matrix = list(zip(*matrix[1:]))[::-1]
return lst
| 0 | 0 | 0 |
342069d7d55981fcab3a1c5635c7a9b5c1fab879 | 5,886 | py | Python | cats/v2/server/server.py | Cifrazia/cats-python | de75b8b5b6ab60d7e250cb4c041c1515aa749d79 | [
"MIT"
] | 2 | 2021-10-04T05:39:03.000Z | 2021-10-07T06:44:19.000Z | cats/v2/server/server.py | Cifrazia/cats-python | de75b8b5b6ab60d7e250cb4c041c1515aa749d79 | [
"MIT"
] | 3 | 2021-10-07T07:07:48.000Z | 2021-12-27T14:04:51.000Z | cats/v2/server/server.py | Cifrazia/cats-python | de75b8b5b6ab60d7e250cb4c041c1515aa749d79 | [
"MIT"
] | 2 | 2021-10-01T20:58:25.000Z | 2021-10-04T05:40:35.000Z | import asyncio
import socket
import ssl
from contextlib import asynccontextmanager
from logging import getLogger
from typing import Callable
from tornado.iostream import IOStream
from tornado.tcpserver import TCPServer
from tornado.testing import bind_unused_port
from cats.errors import CatsError
from cats.utils import as_uint, to_uint
from cats.v2.connection import ConnType, Connection
from cats.v2.server.application import Application
from cats.v2.server.connection import Connection as ServerConnection
from cats.v2.server.proxy import handle_with_proxy
__all__ = [
'Server',
]
logging = getLogger('CATS.Server')
| 31.142857 | 89 | 0.579001 | import asyncio
import socket
import ssl
from contextlib import asynccontextmanager
from logging import getLogger
from typing import Callable
from tornado.iostream import IOStream
from tornado.tcpserver import TCPServer
from tornado.testing import bind_unused_port
from cats.errors import CatsError
from cats.utils import as_uint, to_uint
from cats.v2.connection import ConnType, Connection
from cats.v2.server.application import Application
from cats.v2.server.connection import Connection as ServerConnection
from cats.v2.server.proxy import handle_with_proxy
__all__ = [
'Server',
]
logging = getLogger('CATS.Server')
class Server(TCPServer):
__slots__ = ('app', 'port', 'connections')
protocols: tuple[int, int] = 2, 2
instances: list['Server'] = []
def __init__(
self, app: Application,
ssl_options: dict[str] | ssl.SSLContext | None = None,
max_buffer_size: int | None = None,
read_chunk_size: int | None = None
):
self.app: Application = app
self.port: int | None = None
self.connections: list[Connection] = []
super().__init__(
ssl_options=ssl_options,
max_buffer_size=max_buffer_size,
read_chunk_size=read_chunk_size
)
@classmethod
async def broadcast(
cls,
channel: str,
handler_id: int,
data=None,
message_id: int = None,
compression: int = None,
*, headers=None, status: int = None
):
return await asyncio.gather(
*(
conn.send(
handler_id,
data,
message_id,
compression,
headers=headers,
status=status
)
for server in cls.running_servers()
for conn in server.app.channel(channel)
)
)
@classmethod
async def conditional_broadcast(
cls,
channel: str,
_filter: Callable[['Server', Connection], bool],
handler_id: int,
data=None,
message_id: int = None,
compression: int = None,
*, headers=None, status: int = None
):
return await asyncio.gather(
*(
conn.send(
handler_id,
data,
message_id,
compression,
headers=headers,
status=status
)
for server in cls.running_servers()
for conn in server.app.channel(channel)
if _filter(server, conn)
)
)
@handle_with_proxy
async def handle_stream(self, stream: IOStream, address: tuple[str, int]) -> None:
try:
protocol_version = as_uint(await stream.read_bytes(4))
if not self.protocols[0] <= protocol_version <= self.protocols[1]:
await stream.write(to_uint(self.protocols[1], 4))
stream.close(CatsError('Unsupported protocol version'))
return
await stream.write(bytes(4))
async with self.create_connection(stream, address, protocol_version) as conn:
conn: ServerConnection
conn.debug(f'[INIT {address}]')
await conn.init()
await conn.start()
conn.debug(f'[STOP {address}]')
except self.app.config.stream_errors:
pass
@asynccontextmanager
async def create_connection(
self,
stream: IOStream,
address: tuple[str, int],
protocol: int,
) -> ConnType:
conn_class = self.app.ConnectionClass or ServerConnection
conn = conn_class(stream, address, protocol, self.app.config, self.app)
try:
self.connections.append(conn)
self.app.attach_conn_to_channel(conn, '__all__')
async with conn:
yield conn
except (KeyboardInterrupt, asyncio.CancelledError, asyncio.TimeoutError):
raise
except self.app.config.ignore_errors:
pass
finally:
self.app.remove_conn_from_channels(conn)
try:
self.connections.remove(conn)
except ValueError:
pass
@classmethod
def running_servers(cls) -> list['Server']:
return [server for server in cls.instances if server.is_running]
@property
def is_running(self) -> bool:
return self._started and not self._stopped
async def shutdown(self, exc=None):
for conn in self.connections:
conn.close(exc=exc)
self.app.clear_all_channels()
self.connections.clear()
logging.info('Shutting down TCP Server')
self.stop()
def start(self, num_processes: int = 1, max_restarts: int = None) -> None:
super().start(num_processes, max_restarts)
def bind_unused_port(self):
sock, port = bind_unused_port()
self.add_socket(sock)
self.port = port
logging.info(f'Starting server at 127.0.0.1:{port}')
def bind(
self, port: int, address: str = None,
family: socket.AddressFamily = socket.AF_UNSPEC,
backlog: int = 128, reuse_port: bool = False
) -> None:
super().bind(port, address, family, backlog, reuse_port)
self.port = port
logging.info(f'Starting server at {address}:{port}')
def listen(self, port: int, address: str = "") -> None:
super().listen(port, address)
self.port = port
logging.info(f'Starting server at {address}:{port}')
def __new__(cls, *args, **kwargs):
obj = super().__new__(cls)
cls.instances.append(obj)
return obj
def __del__(self):
self.instances.remove(self)
| 4,619 | 614 | 23 |
66ee06f9edf175407996d4f1645346491097442e | 3,900 | py | Python | activity/prepare_data.py | gorgitko/MI-PDD_2016 | 6aabf0d588ee62814cd625526795cacd2810058a | [
"MIT"
] | null | null | null | activity/prepare_data.py | gorgitko/MI-PDD_2016 | 6aabf0d588ee62814cd625526795cacd2810058a | [
"MIT"
] | null | null | null | activity/prepare_data.py | gorgitko/MI-PDD_2016 | 6aabf0d588ee62814cd625526795cacd2810058a | [
"MIT"
] | null | null | null | import sys
from pathlib import Path
sys.path.append(str(Path('.').absolute().parent))
from helper_functions import encode_smiles, save_smiles_charcodes, canonize_smiles
import pandas as pd
import numpy as np
def save_active_compounds(input_file, output_file, encode=True, longest_smiles=0, smiles_charcodes_file="data/smiles_charcodes.npy",
smiles_col="CANONICAL_SMILES", delimiter="\t"):
"""
Save X_active compounds from ChEMBL CSV file.
Parameters
----------
input_file
output_file
encode
If True, encode SMILES to one-hot matrices.
longest_smiles
How long should be longest SMILES. If len(smiles) < longest_smiles, it gets padded with 0's (resp. its ASCII charcodes).
smiles_charcodes_file
.npy file containing list of all possible ASCII charcodes of SMILES.
Returns
-------
numpy.array
If encode, contains one-hot matrices (scipy.sparse.csr_matrix) of SMILES. Otherwise array of SMILES strings.
"""
compounds = pd.read_csv(input_file, delimiter=delimiter)
#compounds = compounds[compounds["STANDARD_UNITS"].isin(["nM", "uM"])]
#compounds = compounds[compounds["STANDARD_TYPE"].isin(["Kd", "Potency"])]
compounds = compounds[smiles_col]
compounds = compounds.astype("str")
compounds = compounds.apply(canonize_smiles)
compounds = compounds[compounds != "invalid"]
if encode:
compounds = encode_smiles(compounds, np.load(smiles_charcodes_file), longest_smiles=longest_smiles)
np.save(output_file, compounds)
return compounds
def save_inactive_compounds(input_file, output_file, n_compounds, n_files=17, encode=True, longest_smiles=0,
smiles_charcodes_file="data/smiles_charcodes.npy"):
"""
Save X_inactive compounds from multiple files containing SMILES from ZINC.
Parameters
----------
input_file
output_file
n_compounds
n_files
encode
If True, encode SMILES to one-hot matrices.
longest_smiles
How long should be longest SMILES. If len(smiles) < longest_smiles, it gets padded with 0's (resp. its ASCII charcodes).
smiles_charcodes_file
.npy file containing list of all possible ASCII charcodes of SMILES.
Returns
-------
numpy.array
If encode, contains one-hot matrices (scipy.sparse.csr_matrix) of SMILES. Otherwise array of SMILES strings.
"""
n_per_file = n_compounds // n_files
compounds = pd.Series()
for i in range(n_files):
if i < 10:
file_path_part = input_file.format(0, i)
else:
file_path_part = input_file.format("", i)
print("Processing input_file {}/{}:".format(i + 1, n_files), file_path_part)
with open(file_path_part, mode="r") as f:
data_part = [x.strip() for x in f.readlines()]
data_part = pd.Series(data_part)
compounds = compounds.append(data_part.sample(n=n_per_file))
compounds = compounds.apply(canonize_smiles)
compounds = compounds[compounds != "invalid"]
if encode:
compounds = encode_smiles(compounds, np.load(smiles_charcodes_file), longest_smiles=longest_smiles)
np.save(output_file, compounds)
return compounds
if __name__ == "__main__":
#save_active_compounds("data/dna_pol_iota-X_active-117k.csv", "data/dna_pol_iota-X_active-117k-encoded", longest_smiles=150, encode=True)
#save_inactive_compounds("/home/jirka/temp/zinc/smiles/zinc.smiles.part{}{}", "data/zinc-X_inactive-117k-encoded", 116723, longest_smiles=150, encode=True)
#save_active_compounds("data/dna_pol_iota-active-117k.csv", "data/dna_pol_iota-active-117k-smiles", longest_smiles=150, encode=False)
#save_inactive_compounds("/home/jirka/temp/zinc/smiles/zinc.smiles.part{}{}", "data/zinc-inactive-117k-smiles", 116723, longest_smiles=150, encode=False)
pass
| 39.393939 | 159 | 0.698974 | import sys
from pathlib import Path
sys.path.append(str(Path('.').absolute().parent))
from helper_functions import encode_smiles, save_smiles_charcodes, canonize_smiles
import pandas as pd
import numpy as np
def save_active_compounds(input_file, output_file, encode=True, longest_smiles=0, smiles_charcodes_file="data/smiles_charcodes.npy",
smiles_col="CANONICAL_SMILES", delimiter="\t"):
"""
Save X_active compounds from ChEMBL CSV file.
Parameters
----------
input_file
output_file
encode
If True, encode SMILES to one-hot matrices.
longest_smiles
How long should be longest SMILES. If len(smiles) < longest_smiles, it gets padded with 0's (resp. its ASCII charcodes).
smiles_charcodes_file
.npy file containing list of all possible ASCII charcodes of SMILES.
Returns
-------
numpy.array
If encode, contains one-hot matrices (scipy.sparse.csr_matrix) of SMILES. Otherwise array of SMILES strings.
"""
compounds = pd.read_csv(input_file, delimiter=delimiter)
#compounds = compounds[compounds["STANDARD_UNITS"].isin(["nM", "uM"])]
#compounds = compounds[compounds["STANDARD_TYPE"].isin(["Kd", "Potency"])]
compounds = compounds[smiles_col]
compounds = compounds.astype("str")
compounds = compounds.apply(canonize_smiles)
compounds = compounds[compounds != "invalid"]
if encode:
compounds = encode_smiles(compounds, np.load(smiles_charcodes_file), longest_smiles=longest_smiles)
np.save(output_file, compounds)
return compounds
def save_inactive_compounds(input_file, output_file, n_compounds, n_files=17, encode=True, longest_smiles=0,
smiles_charcodes_file="data/smiles_charcodes.npy"):
"""
Save X_inactive compounds from multiple files containing SMILES from ZINC.
Parameters
----------
input_file
output_file
n_compounds
n_files
encode
If True, encode SMILES to one-hot matrices.
longest_smiles
How long should be longest SMILES. If len(smiles) < longest_smiles, it gets padded with 0's (resp. its ASCII charcodes).
smiles_charcodes_file
.npy file containing list of all possible ASCII charcodes of SMILES.
Returns
-------
numpy.array
If encode, contains one-hot matrices (scipy.sparse.csr_matrix) of SMILES. Otherwise array of SMILES strings.
"""
n_per_file = n_compounds // n_files
compounds = pd.Series()
for i in range(n_files):
if i < 10:
file_path_part = input_file.format(0, i)
else:
file_path_part = input_file.format("", i)
print("Processing input_file {}/{}:".format(i + 1, n_files), file_path_part)
with open(file_path_part, mode="r") as f:
data_part = [x.strip() for x in f.readlines()]
data_part = pd.Series(data_part)
compounds = compounds.append(data_part.sample(n=n_per_file))
compounds = compounds.apply(canonize_smiles)
compounds = compounds[compounds != "invalid"]
if encode:
compounds = encode_smiles(compounds, np.load(smiles_charcodes_file), longest_smiles=longest_smiles)
np.save(output_file, compounds)
return compounds
if __name__ == "__main__":
#save_active_compounds("data/dna_pol_iota-X_active-117k.csv", "data/dna_pol_iota-X_active-117k-encoded", longest_smiles=150, encode=True)
#save_inactive_compounds("/home/jirka/temp/zinc/smiles/zinc.smiles.part{}{}", "data/zinc-X_inactive-117k-encoded", 116723, longest_smiles=150, encode=True)
#save_active_compounds("data/dna_pol_iota-active-117k.csv", "data/dna_pol_iota-active-117k-smiles", longest_smiles=150, encode=False)
#save_inactive_compounds("/home/jirka/temp/zinc/smiles/zinc.smiles.part{}{}", "data/zinc-inactive-117k-smiles", 116723, longest_smiles=150, encode=False)
pass
| 0 | 0 | 0 |
154e90498cf56f92c3e1e5d17dc97bd08d50c31d | 438 | py | Python | src/apetest/version.py | boxingbeetle/apetest | c6dd7aaca014c64eec4bde7e755c4a3dec72404a | [
"BSD-3-Clause"
] | 6 | 2019-04-01T09:42:31.000Z | 2020-05-20T15:23:17.000Z | src/apetest/version.py | boxingbeetle/apetest | c6dd7aaca014c64eec4bde7e755c4a3dec72404a | [
"BSD-3-Clause"
] | 31 | 2019-02-04T11:38:32.000Z | 2022-03-03T02:51:15.000Z | src/apetest/version.py | boxingbeetle/apetest | c6dd7aaca014c64eec4bde7e755c4a3dec72404a | [
"BSD-3-Clause"
] | null | null | null | # SPDX-License-Identifier: BSD-3-Clause
"""Package version info."""
from typing import TYPE_CHECKING
# On Python 3.8+, use importlib.metadata from the standard library.
# On older versions, a compatibility package can be installed from PyPI.
try:
if not TYPE_CHECKING:
import importlib.metadata as importlib_metadata
except ImportError:
import importlib_metadata
VERSION_STRING = importlib_metadata.version("apetest")
| 27.375 | 72 | 0.773973 | # SPDX-License-Identifier: BSD-3-Clause
"""Package version info."""
from typing import TYPE_CHECKING
# On Python 3.8+, use importlib.metadata from the standard library.
# On older versions, a compatibility package can be installed from PyPI.
try:
if not TYPE_CHECKING:
import importlib.metadata as importlib_metadata
except ImportError:
import importlib_metadata
VERSION_STRING = importlib_metadata.version("apetest")
| 0 | 0 | 0 |
c50e420a81401ab570df899e6cf676f21d07adb7 | 2,476 | py | Python | data/dataset/wider_mafa_face.py | donnyyou/centerX | 6e381cb669a6014d02e31a43915271237690531c | [
"Apache-2.0"
] | 350 | 2020-12-01T09:55:16.000Z | 2020-12-23T13:47:43.000Z | data/dataset/wider_mafa_face.py | powerlic/centerX | 1073753533f26483c3ab053a7d8753708fcacde7 | [
"Apache-2.0"
] | 39 | 2020-12-24T13:42:29.000Z | 2022-02-10T01:09:56.000Z | data/dataset/wider_mafa_face.py | powerlic/centerX | 1073753533f26483c3ab053a7d8753708fcacde7 | [
"Apache-2.0"
] | 49 | 2020-12-01T11:39:14.000Z | 2020-12-21T01:45:39.000Z | import os
import json
import cv2
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.structures import BoxMode
import pickle
import xml.etree.ElementTree as ET
from typing import List, Tuple, Union
from fvcore.common.file_io import PathManager
import logging
__all__ = ["load_face_instances", "register_face"]
# fmt: off
CLASS_NAMES = ("face",)
# fmt: on
def load_face_instances(txt, annotation_dirname, image_root, class_names):
"""
Load crowdhuman detection annotations to Detectron2 format.
"""
# Needs to read many small annotation files. Makes sense at local
lines = open(txt).readlines()
dicts = []
for line in lines:
fileid = line.strip()
jpeg_file = os.path.join(image_root, fileid + ".jpg")
anno_file = os.path.join(annotation_dirname, fileid + ".xml")
with PathManager.open(anno_file) as f:
tree = ET.parse(f)
r = {
"file_name": jpeg_file,
"image_id": fileid,
"height": int(tree.findall("./size/height")[0].text),
"width": int(tree.findall("./size/width")[0].text),
}
instances = []
for obj in tree.findall("object"):
cls = obj.find("name").text
# We include "difficult" samples in training.
# Based on limited experiments, they don't hurt accuracy.
# difficult = int(obj.find("difficult").text)
# if difficult == 1:
# continue
bbox = obj.find("bndbox")
bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]]
# Original annotations are integers in the range [1, W or H]
# Assuming they mean 1-based pixel indices (inclusive),
# a box with annotation (xmin=1, xmax=W) covers the whole image.
# In coordinate space this is represented by (xmin=0, xmax=W)
bbox[0] -= 1.0
bbox[1] -= 1.0
instances.append(
{"category_id": class_names.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS}
)
r["annotations"] = instances
dicts.append(r)
return dicts
| 33.013333 | 112 | 0.620759 | import os
import json
import cv2
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.structures import BoxMode
import pickle
import xml.etree.ElementTree as ET
from typing import List, Tuple, Union
from fvcore.common.file_io import PathManager
import logging
__all__ = ["load_face_instances", "register_face"]
# fmt: off
CLASS_NAMES = ("face",)
# fmt: on
def load_face_instances(txt, annotation_dirname, image_root, class_names):
"""
Load crowdhuman detection annotations to Detectron2 format.
"""
# Needs to read many small annotation files. Makes sense at local
lines = open(txt).readlines()
dicts = []
for line in lines:
fileid = line.strip()
jpeg_file = os.path.join(image_root, fileid + ".jpg")
anno_file = os.path.join(annotation_dirname, fileid + ".xml")
with PathManager.open(anno_file) as f:
tree = ET.parse(f)
r = {
"file_name": jpeg_file,
"image_id": fileid,
"height": int(tree.findall("./size/height")[0].text),
"width": int(tree.findall("./size/width")[0].text),
}
instances = []
for obj in tree.findall("object"):
cls = obj.find("name").text
# We include "difficult" samples in training.
# Based on limited experiments, they don't hurt accuracy.
# difficult = int(obj.find("difficult").text)
# if difficult == 1:
# continue
bbox = obj.find("bndbox")
bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]]
# Original annotations are integers in the range [1, W or H]
# Assuming they mean 1-based pixel indices (inclusive),
# a box with annotation (xmin=1, xmax=W) covers the whole image.
# In coordinate space this is represented by (xmin=0, xmax=W)
bbox[0] -= 1.0
bbox[1] -= 1.0
instances.append(
{"category_id": class_names.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS}
)
r["annotations"] = instances
dicts.append(r)
return dicts
def register_face(name, txt, annotation_dirname, image_root, class_names=CLASS_NAMES):
DatasetCatalog.register(name, lambda: load_face_instances(txt, annotation_dirname, image_root, class_names))
MetadataCatalog.get(name).set(
thing_classes=list(class_names)
)
| 259 | 0 | 23 |
bfc4f24e4344f53688f6ad04ec5eda7354d3977a | 191 | py | Python | datasets/__init__.py | marsggbo/CovidNet3D | 0aeca91a775f938a0e568dd88d8162473dacf3ce | [
"MIT"
] | 5 | 2021-02-23T06:43:31.000Z | 2021-07-05T15:24:05.000Z | datasets/__init__.py | etherx-dev/CovidNet3D | b107d7d965cad07f1890ee492857273f3468cc01 | [
"MIT"
] | 1 | 2021-06-08T21:06:10.000Z | 2021-06-08T21:06:10.000Z | datasets/__init__.py | etherx-dev/CovidNet3D | b107d7d965cad07f1890ee492857273f3468cc01 | [
"MIT"
] | 4 | 2021-02-01T03:29:16.000Z | 2021-08-05T09:13:37.000Z | from .build import *
from .common_datasets import *
from .transforms import *
from .albumentations_transforms import *
from .ct_data import *
from .ct_transforms import *
from .utils import * | 27.285714 | 40 | 0.78534 | from .build import *
from .common_datasets import *
from .transforms import *
from .albumentations_transforms import *
from .ct_data import *
from .ct_transforms import *
from .utils import * | 0 | 0 | 0 |
3285f565ffab54ab3ae9b30830d036649c61a32d | 5,697 | py | Python | pyaugmecon/model.py | vishalbelsare/pyaugmecon | b9b6310b66007d1be7035f50a7e2691e7669f74e | [
"MIT"
] | 5 | 2021-05-29T20:18:06.000Z | 2022-01-20T08:56:26.000Z | pyaugmecon/model.py | vishalbelsare/pyaugmecon | b9b6310b66007d1be7035f50a7e2691e7669f74e | [
"MIT"
] | null | null | null | pyaugmecon/model.py | vishalbelsare/pyaugmecon | b9b6310b66007d1be7035f50a7e2691e7669f74e | [
"MIT"
] | 3 | 2021-08-20T19:27:28.000Z | 2022-01-21T13:42:49.000Z | import os
import logging
import cloudpickle
import numpy as np
import pyomo.environ as pyo
from pyaugmecon.options import Options
from pyaugmecon.helper import Counter, ProgressBar
from pyomo.core.base import (
Var,
ConstraintList,
maximize,
minimize,
Set,
Param,
NonNegativeReals,
Any,
)
| 31.475138 | 88 | 0.582236 | import os
import logging
import cloudpickle
import numpy as np
import pyomo.environ as pyo
from pyaugmecon.options import Options
from pyaugmecon.helper import Counter, ProgressBar
from pyomo.core.base import (
Var,
ConstraintList,
maximize,
minimize,
Set,
Param,
NonNegativeReals,
Any,
)
class Model(object):
def __init__(self, model: pyo.ConcreteModel, opts: Options):
self.model = model
self.opts = opts
self.logger = logging.getLogger(opts.log_name)
self.n_obj = len(self.model.obj_list)
self.iter_obj = range(self.n_obj)
self.iter_obj2 = range(self.n_obj - 1)
# Setup progress bar
self.to_solve = self.opts.gp ** (self.n_obj - 1) + self.n_obj ** 2
self.progress = ProgressBar(Counter(), self.to_solve)
self.models_solved = Counter()
self.infeasibilities = Counter()
if self.n_obj < 2:
raise Exception("Too few objective functions provided")
def obj(self, i):
return self.model.obj_list[i + 1]
def obj_val(self, i):
return self.obj(i)()
def obj_expr(self, i):
return self.obj(i).expr
def obj_sense(self, i):
return self.obj(i).sense
def slack_val(self, i):
return self.model.Slack[i + 1].value
def obj_activate(self, i):
self.obj(i).activate()
def obj_deactivate(self, i):
self.obj(i).deactivate()
def solve(self):
opt = pyo.SolverFactory(self.opts.solver_name, solver_io=self.opts.solver_io)
opt.options.update(self.opts.solver_opts)
self.result = opt.solve(self.model)
self.term = self.result.solver.termination_condition
self.status = self.result.solver.status
def pickle(self):
model_file = open(self.opts.model_fn, "wb")
cloudpickle.dump(self.model, model_file)
del self.model
def unpickle(self):
model_file = open(self.opts.model_fn, "rb")
self.model = cloudpickle.load(model_file)
def clean(self):
if os.path.exists(self.opts.model_fn):
os.remove(self.opts.model_fn)
def is_optimal(self):
return (
self.status == pyo.SolverStatus.ok
and self.term == pyo.TerminationCondition.optimal
)
def is_infeasible(self):
return (
self.term == pyo.TerminationCondition.infeasible
or self.term == pyo.TerminationCondition.infeasibleOrUnbounded
)
def min_to_max(self):
self.obj_goal = [
-1 if self.obj_sense(o) == minimize else 1 for o in self.iter_obj
]
for o in self.iter_obj:
if self.obj_sense(o) == minimize:
self.model.obj_list[o + 1].sense = maximize
self.model.obj_list[o + 1].expr = -1 * self.model.obj_list[o + 1].expr
def construct_payoff(self):
self.logger.info("Constructing payoff")
self.progress.set_message("constructing payoff")
def set_payoff(i, j):
self.obj_activate(j)
self.solve()
self.progress.increment()
self.payoff[i, j] = self.obj_val(j)
self.obj_deactivate(j)
self.payoff = np.full((self.n_obj, self.n_obj), np.inf)
self.model.pcon_list = ConstraintList()
# Independently optimize each objective function (diagonal elements)
for i in self.iter_obj:
for j in self.iter_obj:
if i == j:
set_payoff(i, j)
# Optimize j having all the i as constraints (off-diagonal elements)
for i in self.iter_obj:
self.model.pcon_list.add(expr=self.obj_expr(i) == self.payoff[i, i])
for j in self.iter_obj:
if i != j:
set_payoff(i, j)
self.model.pcon_list.add(expr=self.obj_expr(j) == self.payoff[i, j])
self.model.pcon_list.clear()
def find_obj_range(self):
self.logger.info("Finding objective function range")
# Gridpoints of p-1 objective functions that are used as constraints
self.e = np.zeros((self.n_obj - 1, self.opts.gp))
self.obj_range = np.zeros(self.n_obj - 1)
for i in self.iter_obj2:
if self.opts.nadir_p:
min = self.opts.nadir_p[i]
else:
min = self.opts.nadir_r * np.min(self.payoff[:, i + 1], 0)
max = np.max(self.payoff[:, i + 1], 0)
self.obj_range[i] = max - min
self.e[i] = [
min + j * (self.obj_range[i] / (self.opts.gp - 1))
for j in range(0, self.opts.gp)
]
def convert_prob(self):
self.logger.info("Converting optimization problem")
self.model.con_list = ConstraintList()
# Set of objective functions
self.model.Os = Set(ordered=True, initialize=[o + 2 for o in self.iter_obj2])
# Slack for objectives introduced as constraints
self.model.Slack = Var(self.model.Os, within=NonNegativeReals)
self.model.e = Param(
self.model.Os,
initialize=[np.nan for _ in self.model.Os],
within=Any,
mutable=True,
) # RHS of constraints
# Add p-1 objective functions as constraints
for o in range(1, self.n_obj):
self.model.obj_list[1].expr += self.opts.eps * (
10 ** (-1 * (o - 1)) * self.model.Slack[o + 1] / self.obj_range[o - 1]
)
self.model.con_list.add(
expr=self.model.obj_list[o + 1].expr - self.model.Slack[o + 1]
== self.model.e[o + 1]
)
| 4,868 | -1 | 508 |
c4d42648c717a474389b6ca6d39d2e3139ef1739 | 649 | py | Python | server/websocket.py | ikiler/MagicRobot | 5e0764060b61aa155082b3387c033430bd0ec8b6 | [
"MIT"
] | null | null | null | server/websocket.py | ikiler/MagicRobot | 5e0764060b61aa155082b3387c033430bd0ec8b6 | [
"MIT"
] | null | null | null | server/websocket.py | ikiler/MagicRobot | 5e0764060b61aa155082b3387c033430bd0ec8b6 | [
"MIT"
] | null | null | null | from tornado.websocket import WebSocketHandler
| 24.037037 | 50 | 0.620955 | from tornado.websocket import WebSocketHandler
class SocketHandler(WebSocketHandler):
def __init__(self):
print("")
users = set() # 用来存放在线用户的容器
def open(self):
self.users.add(self) # 建立连接后添加用户到容器中
for u in self.users: # 向已在线用户发送消息
u.write_message("hello")
def on_message(self, message):
for u in self.users: # 向在线用户广播消息
u.write_message(u"hello2")
def on_close(self):
self.users.remove(self) # 用户关闭连接后从容器中移除用户
for u in self.users:
u.write_message("ffffff")
def check_origin(self, origin):
return True # 允许WebSocket的跨域请求
| 500 | 208 | 23 |
f52617a12608ce90e4e74a77b0a04560f750f451 | 1,365 | py | Python | setup.py | kgaughan/uwhoisd | 0b781e2eb6f6230ac5e64a79985b9d119e495164 | [
"MIT"
] | 32 | 2015-05-13T11:02:29.000Z | 2021-12-24T08:17:16.000Z | setup.py | kgaughan/uwhoisd | 0b781e2eb6f6230ac5e64a79985b9d119e495164 | [
"MIT"
] | 15 | 2015-11-25T18:58:08.000Z | 2020-03-24T09:48:51.000Z | setup.py | kgaughan/uwhoisd | 0b781e2eb6f6230ac5e64a79985b9d119e495164 | [
"MIT"
] | 3 | 2015-02-01T14:43:34.000Z | 2018-08-27T10:10:23.000Z | #!/usr/bin/env python3
import os.path
from setuptools import find_packages, setup
def read(filename):
"""Read files relative to this file."""
full_path = os.path.join(os.path.dirname(__file__), filename)
with open(full_path, "r") as fh:
return fh.read()
setup(
name="uwhoisd",
version="0.0.7",
description="Universal domain WHOIS proxy server.",
long_description=read("README") + "\n\n" + read("ChangeLog"),
url="https://github.com/kgaughan/uwhoisd/",
license="MIT",
packages=find_packages(exclude=["tests"]),
include_package_data=True,
zip_safe=True,
setup_requires=["setuptools", "wheel"],
install_requires=["tornado", "netaddr==0.7.18"],
extras_require={"scraper": ["beautifulsoup4", "requests"]},
entry_points={
"console_scripts": ("uwhoisd = uwhoisd:main",),
"uwhoisd.cache": ("lfu = uwhoisd.caching:LFU",),
},
classifiers=(
"Development Status :: 2 - Pre-Alpha",
"Environment :: No Input/Output (Daemon)",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Internet",
"Topic :: System :: Networking",
),
author="Keith Gaughan",
author_email="k@stereochro.me",
)
| 30.333333 | 65 | 0.625641 | #!/usr/bin/env python3
import os.path
from setuptools import find_packages, setup
def read(filename):
"""Read files relative to this file."""
full_path = os.path.join(os.path.dirname(__file__), filename)
with open(full_path, "r") as fh:
return fh.read()
setup(
name="uwhoisd",
version="0.0.7",
description="Universal domain WHOIS proxy server.",
long_description=read("README") + "\n\n" + read("ChangeLog"),
url="https://github.com/kgaughan/uwhoisd/",
license="MIT",
packages=find_packages(exclude=["tests"]),
include_package_data=True,
zip_safe=True,
setup_requires=["setuptools", "wheel"],
install_requires=["tornado", "netaddr==0.7.18"],
extras_require={"scraper": ["beautifulsoup4", "requests"]},
entry_points={
"console_scripts": ("uwhoisd = uwhoisd:main",),
"uwhoisd.cache": ("lfu = uwhoisd.caching:LFU",),
},
classifiers=(
"Development Status :: 2 - Pre-Alpha",
"Environment :: No Input/Output (Daemon)",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Internet",
"Topic :: System :: Networking",
),
author="Keith Gaughan",
author_email="k@stereochro.me",
)
| 0 | 0 | 0 |
202f6e0f854d82bdb8312b85ad45d6d42e389d12 | 1,115 | py | Python | infra/controller.py | tukeJonny/NTPAmpMitigator | 91abcfb107166b411596b26678a03a037165f188 | [
"MIT"
] | 1 | 2020-06-20T04:21:15.000Z | 2020-06-20T04:21:15.000Z | infra/controller.py | tukeJonny/NTPAmpMitigator | 91abcfb107166b411596b26678a03a037165f188 | [
"MIT"
] | null | null | null | infra/controller.py | tukeJonny/NTPAmpMitigator | 91abcfb107166b411596b26678a03a037165f188 | [
"MIT"
] | null | null | null | #-*- coding: utf-8 -*-
#from ryu.app import simple_switch_13
#import mitigate_switch_13#my custom simple_switch_13
from ryu.controller import ofp_event
from ryu.controller.handler import (
MAIN_DISPATCHER,
DEAD_DISPATCHER
)
from ryu.controller.handler import set_ev_cls
MITIGATE_MODE_ON = False
if MITIGATE_MODE_ON:
import mitigate_switch_13
super_class = mitigate_switch_13.MitigateSwitch13
else:
from ryu.app import simple_switch_13
super_class = simple_switch_13.SimpleSwitch13
| 30.972222 | 82 | 0.715695 | #-*- coding: utf-8 -*-
#from ryu.app import simple_switch_13
#import mitigate_switch_13#my custom simple_switch_13
from ryu.controller import ofp_event
from ryu.controller.handler import (
MAIN_DISPATCHER,
DEAD_DISPATCHER
)
from ryu.controller.handler import set_ev_cls
MITIGATE_MODE_ON = False
if MITIGATE_MODE_ON:
import mitigate_switch_13
super_class = mitigate_switch_13.MitigateSwitch13
else:
from ryu.app import simple_switch_13
super_class = simple_switch_13.SimpleSwitch13
class NTPAmpMitigator(super_class):
def __init__(self, *args, **kwargs):
super(NTPAmpMitigator, self).__init__(*args, **kwargs)
self.datapaths = {}
@set_ev_cls(ofp_event.EventOFPStateChange, [MAIN_DISPATCHER, DEAD_DISPATCHER])
def _state_change_handler(self, ev):
datapath = ev.datapath
if ev.state == MAIN_DISPATCHER:
if not datapath.id in self.datapaths:
self.datapaths[datapath.id] = datapath
elif ev.state == DEAD_DISPATCHER:
if datapath.id in self.datapaths:
del self.datapaths[datapath.id]
| 434 | 151 | 23 |
501edd92a20a95745c9193c57c47f62a89b7c69c | 2,888 | py | Python | module/pages/gas_info.py | medivhXu/AT-M | e1c215ae95085d1be24a7566fd365eb6bfae5e53 | [
"Apache-2.0"
] | 1 | 2019-06-05T08:53:47.000Z | 2019-06-05T08:53:47.000Z | module/pages/gas_info.py | medivhXu/AT-M | e1c215ae95085d1be24a7566fd365eb6bfae5e53 | [
"Apache-2.0"
] | null | null | null | module/pages/gas_info.py | medivhXu/AT-M | e1c215ae95085d1be24a7566fd365eb6bfae5e53 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# encoding: utf-8
"""
@author: Medivh Xu
@file: gas.py
@time: 2020-03-30 15:29
"""
from base.element_manager import *
class GasInfo(BasePage):
"""油站详情"""
gun_name_btn = (By.ID, 'com.xxx.xxx:id/gunName')
money_input_box = (By.ID, 'com.xxx.xxx:id/et_input_money')
money_btn = (By.ID, 'com.xxx.xxx:id/tv_money_{}')
next_btn = (By.ID, 'com.xxx.xxx:id/bt_confirm')
continue_card = (By.ID, 'android:id/content')
continue_next_btn = (By.ID, 'com.xxx.xxx:id/tv_pay')
reset_next_btn = (By.ID, 'com.xxx.xxx:id/reset_btn')
oil_no_btn = (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().resourceId("com.xxx.xxx:id/oilName").text("{}")')
price = (By.ID, 'com.xxx.xxx:id/xxxPrice')
@logged
def click_gun_no(self):
"""点击油枪"""
self.find_element(*self.gun_name_btn).click()
@logged
def get_gun_no_text(self):
"""获取枪号"""
return self.find_element(*self.gun_name_btn).get_attribute('text')
@logged
def click_oil_no_text(self, oil_no):
"""点击油号"""
by, el = self.oil_no_btn
return self.android_uiautomator(*(by, el.format(oil_no))).click()
@logged
def click_money_btn(self, btn_num=1):
"""点击金额"""
by, el = self.money_btn
el = el.format(btn_num)
self.hide_keyboard()
if self.find_element(*(by, el)).get_attribute('clickable'):
# 定位到金额1按钮
self.find_element(*(by, el)).click()
else:
logger.info("弹出系统键盘了")
# self.hide_keyboard()
self.find_element(*(by, el)).click()
@logged
def get_input_money_text(self):
"""获取输入金额"""
text = self.find_element(*self.money_input_box).get_attribute('text')
return text
@logged
def click_next_btn(self):
"""点击下一步"""
self.hide_keyboard()
self.find_element(*self.next_btn).click()
@logged
def click_continue_next_btn(self, switch=True):
"""点击继续支付"""
if self.find_element(*self.continue_card):
if switch:
self.find_element(*self.continue_next_btn).click()
else:
self.find_element(*self.reset_next_btn).click()
else:
self.click_next_btn()
@logged
def input_money(self, money):
"""输入金额"""
self.find_element(*self.money_input_box).clear()
self.find_element(*self.money_input_box).send_keys(money)
@logged
def check_oil_no(self, check_oil_no_text):
""" TODO 目前这个方法有问题,需要图像识别重写
检车油号是否选择
"""
by, el = self.oil_no_btn
el = el.format(check_oil_no_text)
text = self.android_uiautomator(*(by, el))
return text
@logged
def get_price_text(self):
"""获取当前油号价格"""
price = self.find_element(*self.price).get_attribute('text')
return price
| 29.469388 | 115 | 0.601108 | #!/usr/bin/env python3
# encoding: utf-8
"""
@author: Medivh Xu
@file: gas.py
@time: 2020-03-30 15:29
"""
from base.element_manager import *
class GasInfo(BasePage):
"""油站详情"""
gun_name_btn = (By.ID, 'com.xxx.xxx:id/gunName')
money_input_box = (By.ID, 'com.xxx.xxx:id/et_input_money')
money_btn = (By.ID, 'com.xxx.xxx:id/tv_money_{}')
next_btn = (By.ID, 'com.xxx.xxx:id/bt_confirm')
continue_card = (By.ID, 'android:id/content')
continue_next_btn = (By.ID, 'com.xxx.xxx:id/tv_pay')
reset_next_btn = (By.ID, 'com.xxx.xxx:id/reset_btn')
oil_no_btn = (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().resourceId("com.xxx.xxx:id/oilName").text("{}")')
price = (By.ID, 'com.xxx.xxx:id/xxxPrice')
@logged
def click_gun_no(self):
"""点击油枪"""
self.find_element(*self.gun_name_btn).click()
@logged
def get_gun_no_text(self):
"""获取枪号"""
return self.find_element(*self.gun_name_btn).get_attribute('text')
@logged
def click_oil_no_text(self, oil_no):
"""点击油号"""
by, el = self.oil_no_btn
return self.android_uiautomator(*(by, el.format(oil_no))).click()
@logged
def click_money_btn(self, btn_num=1):
"""点击金额"""
by, el = self.money_btn
el = el.format(btn_num)
self.hide_keyboard()
if self.find_element(*(by, el)).get_attribute('clickable'):
# 定位到金额1按钮
self.find_element(*(by, el)).click()
else:
logger.info("弹出系统键盘了")
# self.hide_keyboard()
self.find_element(*(by, el)).click()
@logged
def get_input_money_text(self):
"""获取输入金额"""
text = self.find_element(*self.money_input_box).get_attribute('text')
return text
@logged
def click_next_btn(self):
"""点击下一步"""
self.hide_keyboard()
self.find_element(*self.next_btn).click()
@logged
def click_continue_next_btn(self, switch=True):
"""点击继续支付"""
if self.find_element(*self.continue_card):
if switch:
self.find_element(*self.continue_next_btn).click()
else:
self.find_element(*self.reset_next_btn).click()
else:
self.click_next_btn()
@logged
def input_money(self, money):
"""输入金额"""
self.find_element(*self.money_input_box).clear()
self.find_element(*self.money_input_box).send_keys(money)
@logged
def check_oil_no(self, check_oil_no_text):
""" TODO 目前这个方法有问题,需要图像识别重写
检车油号是否选择
"""
by, el = self.oil_no_btn
el = el.format(check_oil_no_text)
text = self.android_uiautomator(*(by, el))
return text
@logged
def get_price_text(self):
"""获取当前油号价格"""
price = self.find_element(*self.price).get_attribute('text')
return price
| 0 | 0 | 0 |
5df2e1d7267e97e10be3b5bc51334ba0353d8569 | 1,169 | py | Python | grafana_backup/save.py | suhlig/grafana-backup-tool | 3e1e280756efedd2530d5240dc2ec6d3f37d65c9 | [
"MIT"
] | null | null | null | grafana_backup/save.py | suhlig/grafana-backup-tool | 3e1e280756efedd2530d5240dc2ec6d3f37d65c9 | [
"MIT"
] | null | null | null | grafana_backup/save.py | suhlig/grafana-backup-tool | 3e1e280756efedd2530d5240dc2ec6d3f37d65c9 | [
"MIT"
] | null | null | null | from grafana_backup.save_dashboards import main as save_dashboards
from grafana_backup.save_datasources import main as save_datasources
from grafana_backup.save_folders import main as save_folders
from grafana_backup.save_alert_channels import main as save_alert_channels
from grafana_backup.archive import main as archive
| 40.310345 | 74 | 0.706587 | from grafana_backup.save_dashboards import main as save_dashboards
from grafana_backup.save_datasources import main as save_datasources
from grafana_backup.save_folders import main as save_folders
from grafana_backup.save_alert_channels import main as save_alert_channels
from grafana_backup.archive import main as archive
def main(args, settings):
arg_components = args.get('--components', False)
arg_no_archive = args.get('--no-archive', False)
backup_functions = { 'dashboards': save_dashboards,
'datasources': save_datasources,
'folders': save_folders,
'alert-channels': save_alert_channels }
if arg_components:
arg_components_list = arg_components.split(',')
# Backup only the components that provided via an argument
for backup_function in arg_components_list:
backup_functions[backup_function](args, settings)
else:
# Backup every component
for backup_function in backup_functions.keys():
backup_functions[backup_function](args, settings)
if not arg_no_archive:
archive(args, settings)
| 822 | 0 | 23 |
bd3d12d49d8826b3181b830e13cb243a0218dffb | 14,182 | py | Python | main.py | ks-tec/Hydroponic | d9347f82698841d85c0a45908e8671b36c50ffce | [
"MIT"
] | 1 | 2021-05-27T13:32:45.000Z | 2021-05-27T13:32:45.000Z | main.py | ks-tec/Hydroponic | d9347f82698841d85c0a45908e8671b36c50ffce | [
"MIT"
] | null | null | null | main.py | ks-tec/Hydroponic | d9347f82698841d85c0a45908e8671b36c50ffce | [
"MIT"
] | null | null | null | # This is Hydroponic project in MicroPython with the ESP32 board.
# Using devices are SSD1306 OLED, DS18B20, BME280, and Touch Pin.
#
# Copyright (c) 2020 ks-tec
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to dealin the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sellcopies of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE NOT LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS INTHE SOFTWARE.
from machine import I2C, Pin, TouchPad
import os, sys, machine, onewire, ubinascii, ujson, utime, _thread
from lib import ssd1306, bme280, ds18, relay, waterlevel, util
from resource import splashicon
# application setting file
CONFIG_FILE = "hydroponic.json"
# ==================== Main Functions ====================
def main():
"""
Main function for Hydroponic system.
"""
splash_screen()
utime.sleep_ms(DISPLAY_WAITING_SPLASH)
check_platform()
utime.sleep_ms(DISPLAY_WAITING_PLATFORM)
# thread start
_thread.start_new_thread(display_callback, (1, OLED_INTERVAL - ds18.reading_wait))
_thread.start_new_thread(wsupply_callback, (2, WATER_SUPPLY_ON_INTERVAL, WATER_SUPPLY_OFF_INTERVAL))
# ==================== Callback Functions ====================
def display_callback(id, interval_ms):
"""
Callback function for read values from BME280 and DS18x20, water level detector.
After that, bellow showing values to OLED.
Args:
id : thread id
interval_ms : interval time to repeat this function
"""
while True:
oled.fill(0)
oled.text("[air]", 0, 0) # [air]
oled.text("T=" + bme.values[0], 0, 10) # - temperature
oled.text("H=" + bme.values[2], 64, 10) # - humidity
oled.text("P=" + bme.values[1], 0, 20) # - pressure
oled.text("[water]", 0, 30) # [water]
oled.text("W=" + ds18.values[0], 0, 40) # - temperature
if wlevel is not None:
oled.text("L=" + get_wlevel(), 64, 40) # - water level
oled.show()
for cnt in range(3600): # max waiting 1hour = 60min = 3600sec
utime.sleep_ms(1000)
oled.text(".", 8*cnt, 55)
oled.show()
waiting = (cnt + 1) * 1000
if interval_ms <= waiting: # waiting limit has exceeded interval_ms
break
cnt += 1
def wsupply_callback(id, interval_on_ms, interval_off_ms):
"""
Callback function for water supply relay control.
The water supply relay switch to ON when the water level is under water supply start level.
The water supply relay switch to OFF when the water level is over the water supply funish level.
The thread loop can not start and it is terminated, if the water supply is on and the water level detection is off.
Args:
id : thread id
interval_on_ms : interval time to detect the water level and turn on the relay
interval_off_ms : interval time to detect the water level and turn off the relay
"""
while True:
# thread loop is finish, because water supply is off in setting
if wsupply is None:
break
# thread loop is finish, because water level dection is off in setting
if wlevel is None:
print("=" * 20)
print("Warning @{}".format(wsupply_callback.__name__))
print(" The thread for automatic water relay control is terminated because water level dection is off.")
print("=" * 20)
break
# when the detected water level is under the water supply start level
value = get_wlevel(False)
if value < wsupply.supply_start:
print("water supply swith to ON. (L={:3.1f})".format(value))
wsupply.on()
# continue water supply until water supply finish level
while value < wsupply.supply_finish:
utime.sleep_ms(interval_off_ms)
value = get_wlevel(False)
# print("L=({})".format(value))
# when the detected water level is over the water supply finish level
wsupply.off()
print("water supply swith to OFF. (L={:3.1f})".format(value))
utime.sleep_ms(interval_on_ms)
def conv_temperature(value, unit):
"""
"""
if type(unit) is str and unit.upper() in ["C", "F"]:
raise TypeError("the type of paramter unit must be string.")
if unit.upper() == "C":
pass
elif unit.upper() == "F":
value = value * 1.8 + 32
else:
raise ValueError("")
return value
# ==================== Configuration Functions ====================
def load_settings(filename):
"""
Load application setting values from specified file.
The contents of the file must be in json format, and keywords are fixed.
The read value is converted once as string, and then re-converted to the required data type and held in each global variables.
Args:
filename : file name of setting file
Raises:
ValueError : A filename of settings is not specified.
OSError : A setting file is not exists.
"""
global DISPLAY_SPLASH_ICON, DISPLAY_WAITING_SPLASH, DISPLAY_WAITING_PLATFORM, DISPLAY_TEMPERATURE_UNIT
global OLED_PIN_SCL, OLED_PIN_SDA, OLED_ADDRESS, OLED_WIDTH, OLED_HEIGHT, OLED_INTERVAL
global BME280_PIN_SCL, BME280_PIN_SDA, BME280_ADDRESS
global DS18_PIN_DQ, DS18_ADDRESS, DS18_READING_WAIT
global WATER_LEVEL_ENABLE, WATER_LEVEL_PIN, WATER_LEVEL_SENSE_MAX, WATER_LEVEL_SENSE_MIN
global WATER_SUPPLY_ENABLE, WATER_SUPPLY_PIN, WATER_SUPPLY_START, WATER_SUPPLY_FINISH, WATER_SUPPLY_ON_INTERVAL, WATER_SUPPLY_OFF_INTERVAL
if filename is None or len(filename) == 0:
raise ValueError("An application setting file is required.")
elif filename not in os.listdir():
raise OSError("An application setting file is NOT exists.")
with open(filename) as f:
settings = ujson.load(f)
# COMMON settings
DISPLAY_SPLASH_ICON = str(settings["COMMON"]["SPLASH_ICON"]).lower()
DISPLAY_WAITING_SPLASH = int(str(settings["COMMON"]["SPLASH_WAITING"]))
DISPLAY_WAITING_PLATFORM = int(str(settings["COMMON"]["PLATFORM_WAITING"]))
DISPLAY_TEMPERATURE_UNIT = str(settings["COMMON"]["TEMPERATURE_UNIT"])
# OLED settings
OLED_PIN_SCL = int(str(settings["OLED"]["PIN_SCL"]))
OLED_PIN_SDA = int(str(settings["OLED"]["PIN_SDA"]))
OLED_ADDRESS = int(str(settings["OLED"]["ADDRESS"]))
OLED_WIDTH = int(str(settings["OLED"]["WIDTH"]))
OLED_HEIGHT = int(str(settings["OLED"]["HEIGHT"]))
OLED_INTERVAL = int(str(settings["OLED"]["DISPLAY_INTERVAL"]))
# BME280 settings
BME280_PIN_SCL = int(str(settings["BME280"]["PIN_SCL"]))
BME280_PIN_SDA = int(str(settings["BME280"]["PIN_SDA"]))
BME280_ADDRESS = int(str(settings["BME280"]["ADDRESS"]))
# DS18B20 settinsgs
DS18_PIN_DQ = int(str(settings["DS18X20"]["PIN_DQ"]))
DS18_ADDRESS = [int(str(addr)) for addr in settings["DS18X20"]["ADDRESS"]]
DS18_READING_WAIT = int(str(settings["DS18X20"]["READING_WAIT"]))
# WATER LEVEL SENSOR settings
WATER_LEVEL_ENABLE = util.strtobool(str(settings["WATER_LEVEL"]["IS_ENABLE"]))
WATER_LEVEL_PIN = int(str(settings["WATER_LEVEL"]["PIN_DQ"]))
WATER_LEVEL_SENSE_MAX = int(str(settings["WATER_LEVEL"]["SENSE_MAX"]))
WATER_LEVEL_SENSE_MIN = int(str(settings["WATER_LEVEL"]["SENSE_MIN"]))
# WATER SUPPLY RELAY settings
WATER_SUPPLY_ENABLE = util.strtobool(str(settings["WATER_SUPPLY"]["IS_ENABLE"]))
WATER_SUPPLY_PIN = int(str(settings["WATER_SUPPLY"]["PIN_DQ"]))
WATER_SUPPLY_START = float(str(settings["WATER_SUPPLY"]["SUPPLY_START"]))
WATER_SUPPLY_FINISH = float(str(settings["WATER_SUPPLY"]["SUPPLY_FINISH"]))
WATER_SUPPLY_ON_INTERVAL = int(str(settings["WATER_SUPPLY"]["DETECT_INTERVAL_ON"]))
WATER_SUPPLY_OFF_INTERVAL = int(str(settings["WATER_SUPPLY"]["DETECT_INTERVAL_OFF"]))
# ==================== I2C device Functions ====================
def detect_i2c_device(i2c=None, device=None, address=None):
"""
I2C device scan and it was found or else, show message.
Args:
i2c : machine.I2C object
device : name of I2C device to display
address : address of I2C device
Raises:
ValueError : One of the paramters is not specified.
"""
if i2c is None:
raise ValueError("An I2C object is required.")
if address is None:
raise ValueError("A device address is required.")
if device is None or len(device) == 0:
raise ValueError("A device name is required.")
print("Detecting {} ...".format(device))
i2cDevs = i2c.scan()
for idx, dev in enumerate(i2cDevs):
if dev == address:
print(" Found {} device: ['{}']".format(device, hex(dev)))
break
else:
print(" NOT Found I2C device, check wiring of device !")
# ==================== SPI device Functions ====================
def detect_ow_device(ow=None, device=None, address=None):
"""
1-Wire device scan and it was found, show message.
Args:
ow : machine.OneWire object
device : name of 1-Wire device to display
address : list of address for 1-Wire deviece address
Raises:
ValueError : One of the paramters is not specified.
"""
if ow is None:
raise ValueError("An ow object is required.")
if address is None:
raise ValueError("A device address is required.")
if device is None or len(device) == 0:
raise ValueError("A device name is required.")
print("Detecting {} ...".format(device))
owDevs = ow.scan()
for idx, dev in enumerate(owDevs):
addr_int = [int(r) for r in dev]
if addr_int == address:
print(" Found {} device: {}".format(device, [hex(r) for r in dev]))
break
else:
print(" NOT Found 1-Wire device, check wiring of device !")
# ==================== Platform Functions ====================
def check_platform():
"""
Check running platform, and show result to OLED.
Raises:
OSError : The running platform is not ESP32 board.
"""
platform = sys.platform
chip_id = str(ubinascii.hexlify(machine.unique_id()))[2:14]
pclk = machine.freq() // (1000 ** 2)
supported = " Supported"
if platform != "esp32":
raise OSError("Platform is esp32 board required.")
oled.fill(0)
oled.show()
oled.text(platform, 0, 0)
oled.text(supported, 0, 10)
oled.text("UID {}".format(chip_id), 0, 20)
oled.text("PCLK {}MHz".format(pclk) , 0, 30)
oled.show()
print("-" * 20)
print("PLATFORM : {}".format(platform))
print("CHIP UID : {}".format(chip_id))
print("PERIPHERAL CLOCK : {} MHz".format(pclk))
print("-" * 20)
# ==================== OLED Functions ====================
def splash_screen():
"""
Splash logo image to OLED from binary array.
Raises:
ValueError : The parameter value is not in "v" "vertical" "h" "horizontal".
"""
icon = None
if DISPLAY_SPLASH_ICON in ["vertical", "v"]:
icon = splashicon.SplashIcon.logo_v()
elif DISPLAY_SPLASH_ICON in ["horizontal", "h"]:
icon = splashicon.SplashIcon.logo_h()
else:
raise ValueError("The value of 'DISPLAY_SPLASH_ICON' can specify 'v' or 'h' only.")
dx = (oled.width - icon.logo_width) // 2
dy = (oled.height - icon.logo_height) // 2
oled.fill(0)
oled.show()
for y, fila in enumerate(icon.logo_icon):
for x, c in enumerate(fila):
oled.pixel(x + dx, y + dy, c)
oled.show()
# ==================== Water Level Functions ====================
def get_wlevel(with_unit=True):
"""
Remove units from the tuple head index value returned by WaterLevelSensor.
And returns it as a float value.
Also, it uses a lock object because it is called from within the thread.
Args:
with_unit : False is remove units, True does nothing. True is default value.
Retun:
The value part of the tuple head index value returned by WaterLevelSensor.
"""
if wlevel is None:
raise OSError("The water level dection setting is off, must be on.")
with lock:
ret_value = wlevel.values[0]
if with_unit == False:
ret_value = float(ret_value[:len(ret_value)-2])
return ret_value
# ==================== Entry Point ====================
if __name__ == "__main__":
"""
Entry point at functional execution.
"""
try:
# load configuration values
load_settings(CONFIG_FILE)
# gobal devices initialization (I2C OLED SSD1306)
i2c = I2C(scl=Pin(OLED_PIN_SCL), sda=Pin(OLED_PIN_SDA))
oled = ssd1306.SSD1306_I2C(width=OLED_WIDTH, height=OLED_HEIGHT, i2c=i2c)
detect_i2c_device(i2c, "SSD1306", OLED_ADDRESS)
# gobal devices initialization (I2C BME280)
i2c = I2C(scl=Pin(BME280_PIN_SCL), sda=Pin(BME280_PIN_SDA))
bme = bme280.BME280(i2c=i2c, unit=DISPLAY_TEMPERATURE_UNIT)
detect_i2c_device(i2c, "BME280", BME280_ADDRESS)
# gobal devices initialization (1-Wire DS18B20)
ow = onewire.OneWire(pin=Pin(DS18_PIN_DQ))
ds18 = ds18.DS18(ow=ow, reading_wait=DS18_READING_WAIT, unit=DISPLAY_TEMPERATURE_UNIT)
detect_ow_device(ds18, "DS18X20", DS18_ADDRESS)
# global devices initialization (Water Level Capacitive Sensor)
wlevel = None
if WATER_LEVEL_ENABLE == True:
tp = TouchPad(Pin(WATER_LEVEL_PIN))
wlevel = waterlevel.WaterLevelSensor(tp=tp, sense_max=WATER_LEVEL_SENSE_MAX, sense_min=WATER_LEVEL_SENSE_MIN)
# global devices initialization (Water Supply Relay)
wsupply = None
if WATER_SUPPLY_ENABLE == True:
wsupply = relay.Relay(pin=Pin(WATER_SUPPLY_PIN, mode=Pin.OUT), supply_start=WATER_SUPPLY_START, supply_finish=WATER_SUPPLY_FINISH)
wsupply.off()
# call main routine
lock = _thread.allocate_lock()
main()
except Exception as e:
print("\nAn error has occured !")
print("-" * 20)
sys.print_exception(e)
print("-" * 20)
| 34.506083 | 140 | 0.675152 | # This is Hydroponic project in MicroPython with the ESP32 board.
# Using devices are SSD1306 OLED, DS18B20, BME280, and Touch Pin.
#
# Copyright (c) 2020 ks-tec
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to dealin the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sellcopies of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE NOT LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS INTHE SOFTWARE.
from machine import I2C, Pin, TouchPad
import os, sys, machine, onewire, ubinascii, ujson, utime, _thread
from lib import ssd1306, bme280, ds18, relay, waterlevel, util
from resource import splashicon
# application setting file
CONFIG_FILE = "hydroponic.json"
# ==================== Main Functions ====================
def main():
"""
Main function for Hydroponic system.
"""
splash_screen()
utime.sleep_ms(DISPLAY_WAITING_SPLASH)
check_platform()
utime.sleep_ms(DISPLAY_WAITING_PLATFORM)
# thread start
_thread.start_new_thread(display_callback, (1, OLED_INTERVAL - ds18.reading_wait))
_thread.start_new_thread(wsupply_callback, (2, WATER_SUPPLY_ON_INTERVAL, WATER_SUPPLY_OFF_INTERVAL))
# ==================== Callback Functions ====================
def display_callback(id, interval_ms):
"""
Callback function for read values from BME280 and DS18x20, water level detector.
After that, bellow showing values to OLED.
Args:
id : thread id
interval_ms : interval time to repeat this function
"""
while True:
oled.fill(0)
oled.text("[air]", 0, 0) # [air]
oled.text("T=" + bme.values[0], 0, 10) # - temperature
oled.text("H=" + bme.values[2], 64, 10) # - humidity
oled.text("P=" + bme.values[1], 0, 20) # - pressure
oled.text("[water]", 0, 30) # [water]
oled.text("W=" + ds18.values[0], 0, 40) # - temperature
if wlevel is not None:
oled.text("L=" + get_wlevel(), 64, 40) # - water level
oled.show()
for cnt in range(3600): # max waiting 1hour = 60min = 3600sec
utime.sleep_ms(1000)
oled.text(".", 8*cnt, 55)
oled.show()
waiting = (cnt + 1) * 1000
if interval_ms <= waiting: # waiting limit has exceeded interval_ms
break
cnt += 1
def wsupply_callback(id, interval_on_ms, interval_off_ms):
"""
Callback function for water supply relay control.
The water supply relay switch to ON when the water level is under water supply start level.
The water supply relay switch to OFF when the water level is over the water supply funish level.
The thread loop can not start and it is terminated, if the water supply is on and the water level detection is off.
Args:
id : thread id
interval_on_ms : interval time to detect the water level and turn on the relay
interval_off_ms : interval time to detect the water level and turn off the relay
"""
while True:
# thread loop is finish, because water supply is off in setting
if wsupply is None:
break
# thread loop is finish, because water level dection is off in setting
if wlevel is None:
print("=" * 20)
print("Warning @{}".format(wsupply_callback.__name__))
print(" The thread for automatic water relay control is terminated because water level dection is off.")
print("=" * 20)
break
# when the detected water level is under the water supply start level
value = get_wlevel(False)
if value < wsupply.supply_start:
print("water supply swith to ON. (L={:3.1f})".format(value))
wsupply.on()
# continue water supply until water supply finish level
while value < wsupply.supply_finish:
utime.sleep_ms(interval_off_ms)
value = get_wlevel(False)
# print("L=({})".format(value))
# when the detected water level is over the water supply finish level
wsupply.off()
print("water supply swith to OFF. (L={:3.1f})".format(value))
utime.sleep_ms(interval_on_ms)
def conv_temperature(value, unit):
"""
"""
if type(unit) is str and unit.upper() in ["C", "F"]:
raise TypeError("the type of paramter unit must be string.")
if unit.upper() == "C":
pass
elif unit.upper() == "F":
value = value * 1.8 + 32
else:
raise ValueError("")
return value
# ==================== Configuration Functions ====================
def load_settings(filename):
"""
Load application setting values from specified file.
The contents of the file must be in json format, and keywords are fixed.
The read value is converted once as string, and then re-converted to the required data type and held in each global variables.
Args:
filename : file name of setting file
Raises:
ValueError : A filename of settings is not specified.
OSError : A setting file is not exists.
"""
global DISPLAY_SPLASH_ICON, DISPLAY_WAITING_SPLASH, DISPLAY_WAITING_PLATFORM, DISPLAY_TEMPERATURE_UNIT
global OLED_PIN_SCL, OLED_PIN_SDA, OLED_ADDRESS, OLED_WIDTH, OLED_HEIGHT, OLED_INTERVAL
global BME280_PIN_SCL, BME280_PIN_SDA, BME280_ADDRESS
global DS18_PIN_DQ, DS18_ADDRESS, DS18_READING_WAIT
global WATER_LEVEL_ENABLE, WATER_LEVEL_PIN, WATER_LEVEL_SENSE_MAX, WATER_LEVEL_SENSE_MIN
global WATER_SUPPLY_ENABLE, WATER_SUPPLY_PIN, WATER_SUPPLY_START, WATER_SUPPLY_FINISH, WATER_SUPPLY_ON_INTERVAL, WATER_SUPPLY_OFF_INTERVAL
if filename is None or len(filename) == 0:
raise ValueError("An application setting file is required.")
elif filename not in os.listdir():
raise OSError("An application setting file is NOT exists.")
with open(filename) as f:
settings = ujson.load(f)
# COMMON settings
DISPLAY_SPLASH_ICON = str(settings["COMMON"]["SPLASH_ICON"]).lower()
DISPLAY_WAITING_SPLASH = int(str(settings["COMMON"]["SPLASH_WAITING"]))
DISPLAY_WAITING_PLATFORM = int(str(settings["COMMON"]["PLATFORM_WAITING"]))
DISPLAY_TEMPERATURE_UNIT = str(settings["COMMON"]["TEMPERATURE_UNIT"])
# OLED settings
OLED_PIN_SCL = int(str(settings["OLED"]["PIN_SCL"]))
OLED_PIN_SDA = int(str(settings["OLED"]["PIN_SDA"]))
OLED_ADDRESS = int(str(settings["OLED"]["ADDRESS"]))
OLED_WIDTH = int(str(settings["OLED"]["WIDTH"]))
OLED_HEIGHT = int(str(settings["OLED"]["HEIGHT"]))
OLED_INTERVAL = int(str(settings["OLED"]["DISPLAY_INTERVAL"]))
# BME280 settings
BME280_PIN_SCL = int(str(settings["BME280"]["PIN_SCL"]))
BME280_PIN_SDA = int(str(settings["BME280"]["PIN_SDA"]))
BME280_ADDRESS = int(str(settings["BME280"]["ADDRESS"]))
# DS18B20 settinsgs
DS18_PIN_DQ = int(str(settings["DS18X20"]["PIN_DQ"]))
DS18_ADDRESS = [int(str(addr)) for addr in settings["DS18X20"]["ADDRESS"]]
DS18_READING_WAIT = int(str(settings["DS18X20"]["READING_WAIT"]))
# WATER LEVEL SENSOR settings
WATER_LEVEL_ENABLE = util.strtobool(str(settings["WATER_LEVEL"]["IS_ENABLE"]))
WATER_LEVEL_PIN = int(str(settings["WATER_LEVEL"]["PIN_DQ"]))
WATER_LEVEL_SENSE_MAX = int(str(settings["WATER_LEVEL"]["SENSE_MAX"]))
WATER_LEVEL_SENSE_MIN = int(str(settings["WATER_LEVEL"]["SENSE_MIN"]))
# WATER SUPPLY RELAY settings
WATER_SUPPLY_ENABLE = util.strtobool(str(settings["WATER_SUPPLY"]["IS_ENABLE"]))
WATER_SUPPLY_PIN = int(str(settings["WATER_SUPPLY"]["PIN_DQ"]))
WATER_SUPPLY_START = float(str(settings["WATER_SUPPLY"]["SUPPLY_START"]))
WATER_SUPPLY_FINISH = float(str(settings["WATER_SUPPLY"]["SUPPLY_FINISH"]))
WATER_SUPPLY_ON_INTERVAL = int(str(settings["WATER_SUPPLY"]["DETECT_INTERVAL_ON"]))
WATER_SUPPLY_OFF_INTERVAL = int(str(settings["WATER_SUPPLY"]["DETECT_INTERVAL_OFF"]))
# ==================== I2C device Functions ====================
def detect_i2c_device(i2c=None, device=None, address=None):
"""
I2C device scan and it was found or else, show message.
Args:
i2c : machine.I2C object
device : name of I2C device to display
address : address of I2C device
Raises:
ValueError : One of the paramters is not specified.
"""
if i2c is None:
raise ValueError("An I2C object is required.")
if address is None:
raise ValueError("A device address is required.")
if device is None or len(device) == 0:
raise ValueError("A device name is required.")
print("Detecting {} ...".format(device))
i2cDevs = i2c.scan()
for idx, dev in enumerate(i2cDevs):
if dev == address:
print(" Found {} device: ['{}']".format(device, hex(dev)))
break
else:
print(" NOT Found I2C device, check wiring of device !")
# ==================== SPI device Functions ====================
def detect_ow_device(ow=None, device=None, address=None):
"""
1-Wire device scan and it was found, show message.
Args:
ow : machine.OneWire object
device : name of 1-Wire device to display
address : list of address for 1-Wire deviece address
Raises:
ValueError : One of the paramters is not specified.
"""
if ow is None:
raise ValueError("An ow object is required.")
if address is None:
raise ValueError("A device address is required.")
if device is None or len(device) == 0:
raise ValueError("A device name is required.")
print("Detecting {} ...".format(device))
owDevs = ow.scan()
for idx, dev in enumerate(owDevs):
addr_int = [int(r) for r in dev]
if addr_int == address:
print(" Found {} device: {}".format(device, [hex(r) for r in dev]))
break
else:
print(" NOT Found 1-Wire device, check wiring of device !")
# ==================== Platform Functions ====================
def check_platform():
"""
Check running platform, and show result to OLED.
Raises:
OSError : The running platform is not ESP32 board.
"""
platform = sys.platform
chip_id = str(ubinascii.hexlify(machine.unique_id()))[2:14]
pclk = machine.freq() // (1000 ** 2)
supported = " Supported"
if platform != "esp32":
raise OSError("Platform is esp32 board required.")
oled.fill(0)
oled.show()
oled.text(platform, 0, 0)
oled.text(supported, 0, 10)
oled.text("UID {}".format(chip_id), 0, 20)
oled.text("PCLK {}MHz".format(pclk) , 0, 30)
oled.show()
print("-" * 20)
print("PLATFORM : {}".format(platform))
print("CHIP UID : {}".format(chip_id))
print("PERIPHERAL CLOCK : {} MHz".format(pclk))
print("-" * 20)
# ==================== OLED Functions ====================
def splash_screen():
"""
Splash logo image to OLED from binary array.
Raises:
ValueError : The parameter value is not in "v" "vertical" "h" "horizontal".
"""
icon = None
if DISPLAY_SPLASH_ICON in ["vertical", "v"]:
icon = splashicon.SplashIcon.logo_v()
elif DISPLAY_SPLASH_ICON in ["horizontal", "h"]:
icon = splashicon.SplashIcon.logo_h()
else:
raise ValueError("The value of 'DISPLAY_SPLASH_ICON' can specify 'v' or 'h' only.")
dx = (oled.width - icon.logo_width) // 2
dy = (oled.height - icon.logo_height) // 2
oled.fill(0)
oled.show()
for y, fila in enumerate(icon.logo_icon):
for x, c in enumerate(fila):
oled.pixel(x + dx, y + dy, c)
oled.show()
# ==================== Water Level Functions ====================
def get_wlevel(with_unit=True):
"""
Remove units from the tuple head index value returned by WaterLevelSensor.
And returns it as a float value.
Also, it uses a lock object because it is called from within the thread.
Args:
with_unit : False is remove units, True does nothing. True is default value.
Retun:
The value part of the tuple head index value returned by WaterLevelSensor.
"""
if wlevel is None:
raise OSError("The water level dection setting is off, must be on.")
with lock:
ret_value = wlevel.values[0]
if with_unit == False:
ret_value = float(ret_value[:len(ret_value)-2])
return ret_value
# ==================== Entry Point ====================
if __name__ == "__main__":
"""
Entry point at functional execution.
"""
try:
# load configuration values
load_settings(CONFIG_FILE)
# gobal devices initialization (I2C OLED SSD1306)
i2c = I2C(scl=Pin(OLED_PIN_SCL), sda=Pin(OLED_PIN_SDA))
oled = ssd1306.SSD1306_I2C(width=OLED_WIDTH, height=OLED_HEIGHT, i2c=i2c)
detect_i2c_device(i2c, "SSD1306", OLED_ADDRESS)
# gobal devices initialization (I2C BME280)
i2c = I2C(scl=Pin(BME280_PIN_SCL), sda=Pin(BME280_PIN_SDA))
bme = bme280.BME280(i2c=i2c, unit=DISPLAY_TEMPERATURE_UNIT)
detect_i2c_device(i2c, "BME280", BME280_ADDRESS)
# gobal devices initialization (1-Wire DS18B20)
ow = onewire.OneWire(pin=Pin(DS18_PIN_DQ))
ds18 = ds18.DS18(ow=ow, reading_wait=DS18_READING_WAIT, unit=DISPLAY_TEMPERATURE_UNIT)
detect_ow_device(ds18, "DS18X20", DS18_ADDRESS)
# global devices initialization (Water Level Capacitive Sensor)
wlevel = None
if WATER_LEVEL_ENABLE == True:
tp = TouchPad(Pin(WATER_LEVEL_PIN))
wlevel = waterlevel.WaterLevelSensor(tp=tp, sense_max=WATER_LEVEL_SENSE_MAX, sense_min=WATER_LEVEL_SENSE_MIN)
# global devices initialization (Water Supply Relay)
wsupply = None
if WATER_SUPPLY_ENABLE == True:
wsupply = relay.Relay(pin=Pin(WATER_SUPPLY_PIN, mode=Pin.OUT), supply_start=WATER_SUPPLY_START, supply_finish=WATER_SUPPLY_FINISH)
wsupply.off()
# call main routine
lock = _thread.allocate_lock()
main()
except Exception as e:
print("\nAn error has occured !")
print("-" * 20)
sys.print_exception(e)
print("-" * 20)
| 0 | 0 | 0 |
f19d270dc48a4d8e462331fd511b2c2742b7e7b5 | 689 | py | Python | stats_job/database_test.py | arxcruz/tripleo-stats-backend | c4cfb971bbc8e67825d357df2dc9214fda81f2fa | [
"Apache-2.0"
] | null | null | null | stats_job/database_test.py | arxcruz/tripleo-stats-backend | c4cfb971bbc8e67825d357df2dc9214fda81f2fa | [
"Apache-2.0"
] | null | null | null | stats_job/database_test.py | arxcruz/tripleo-stats-backend | c4cfb971bbc8e67825d357df2dc9214fda81f2fa | [
"Apache-2.0"
] | null | null | null | import datetime
from sqlalchemy import Date, func, cast
from sqlalchemy.orm import sessionmaker
from database.model import engine
from database.model import JobRun
if __name__ == '__main__':
show_data()
| 28.708333 | 107 | 0.692308 | import datetime
from sqlalchemy import Date, func, cast
from sqlalchemy.orm import sessionmaker
from database.model import engine
from database.model import JobRun
def show_data():
Session = sessionmaker(engine)
session = Session()
query = session.query(func.count().label('count'), JobRun.failure_type).group_by(
JobRun.failure_type).all()
# .filter(func.date(JobRun.date) == func.date((datetime.datetime.today()-datetime.timedelta(4)))).all()
# .filter(JobRun.date == datetime.datetime.now()-datetime.timedelta(3) ).all()
for row in query:
print('{} - {}'.format(row.count, row.failure_type))
if __name__ == '__main__':
show_data()
| 456 | 0 | 23 |
30b3a8a367927c57abd32d63596d445e1516ee84 | 9,496 | py | Python | qlknn/dataset/data_io.py | Karel-van-de-Plassche/QLKNN-develop | f2d29be625c2ddbddad6c1e98e5c03a43cf2797f | [
"MIT"
] | null | null | null | qlknn/dataset/data_io.py | Karel-van-de-Plassche/QLKNN-develop | f2d29be625c2ddbddad6c1e98e5c03a43cf2797f | [
"MIT"
] | null | null | null | qlknn/dataset/data_io.py | Karel-van-de-Plassche/QLKNN-develop | f2d29be625c2ddbddad6c1e98e5c03a43cf2797f | [
"MIT"
] | 2 | 2018-02-28T14:18:43.000Z | 2018-11-26T11:06:08.000Z | import gc
from collections import OrderedDict
import warnings
import re
import pandas as pd
import numpy as np
from IPython import embed
try:
import dask.dataframe as dd
has_dask = True
except ImportError:
warnings.warn('Dask not found')
has_dask = False
try:
profile
except NameError:
from qlknn.misc.tools import profile
from qlknn.misc.analyse_names import heat_vars, particle_vars, particle_diffusion_vars, momentum_vars, is_flux, is_growth
from qlknn.misc.tools import first
store_format = 'fixed'
sep_prefix = '/output/'
@profile
@profile
| 40.931034 | 165 | 0.542544 | import gc
from collections import OrderedDict
import warnings
import re
import pandas as pd
import numpy as np
from IPython import embed
try:
import dask.dataframe as dd
has_dask = True
except ImportError:
warnings.warn('Dask not found')
has_dask = False
try:
profile
except NameError:
from qlknn.misc.tools import profile
from qlknn.misc.analyse_names import heat_vars, particle_vars, particle_diffusion_vars, momentum_vars, is_flux, is_growth
from qlknn.misc.tools import first
store_format = 'fixed'
sep_prefix = '/output/'
@profile
def convert_nustar(input_df):
# Nustar relates to the targets with a log
try:
input_df['logNustar'] = np.log10(input_df['Nustar'])
del input_df['Nustar']
except KeyError:
print('No Nustar in dataset')
return input_df
def put_to_store_or_df(store_or_df, name, var, store_prefix=sep_prefix):
if isinstance(store_or_df, pd.HDFStore):
store_or_df.put(''.join([store_prefix, name]),
var, format=store_format)
else:
store_or_df[name] = var
def separate_to_store(data, store, save_flux=True, save_growth=True, save_all=False, verbose=False, **put_kwargs):
for col in data:
key = ''.join([sep_prefix, col])
splitted = re.compile('(?=.*)(.)(|ITG|ETG|TEM)_(GB|SI|cm)').split(col)
if ((is_flux(col) and save_flux) or
(is_growth(col) and save_growth) or
save_all):
if verbose:
print('Saving', col)
store.put(key, data[col].dropna(), format=store_format, **put_kwargs)
else:
if verbose:
print('Do not save', col)
def save_to_store(input, data, const, store_name, style='both', zip=False, prefix='/'):
if zip is True:
kwargs = {'complevel': 1,
'complib': 'zlib'}
store_name += '.1'
else:
kwargs = {}
store = pd.HDFStore(store_name)
if style == 'sep' or style == 'both':
separate_to_store(data, store, save_all=True, **kwargs)
if style == 'flat' or style == 'both':
if len(data) > 0:
store.put('flattened', data, format=store_format, **kwargs)
else:
store.put('flattened', data, format='fixed', **kwargs)
store.put(prefix + 'input', input, format=store_format, **kwargs)
with warnings.catch_warnings():
warnings.simplefilter("ignore", pd.errors.PerformanceWarning)
store.put(prefix + 'constants', const)
store.close()
@profile
def load_from_store(store_name=None, store=None, fast=True, mode='bare', how='left', columns=None, prefix='', load_input=True, nustar_to_lognustar=True, dask=False):
if isinstance(columns, str):
columns = [columns]
elif isinstance(columns, pd.Series):
columns = columns.values
if store_name is not None and store is not None:
raise Exception('Specified both store and store name!')
if dask and not has_dask:
raise Exception('Requested dask, but dask import failed')
if store is None:
store = pd.HDFStore(store_name, 'r')
elif dask:
raise ValueError('store cannot be passed if dask=True')
is_legacy = lambda store: all(['megarun' in name for name in store.keys()])
if is_legacy(store):
warnings.warn('Using legacy datafile!')
prefix = '/megarun1/'
has_flattened = lambda store: any(['flattened' in group for group in store.keys()])
have_sep = lambda columns: columns is None or (len(names) == len(columns))
return_all = lambda columns: columns is None
return_no = lambda columns: columns is False
names = store.keys()
# Associate 'nice' name with 'ugly' HDF5 node path
names = [(name, name.replace(prefix + sep_prefix, '', 1))
for name in names
if (('input' not in name) and
('constants' not in name) and
('flattened' not in name))]
# Only return columns the user asked for
if not return_all(columns):
names = [(varname, name) for (varname, name) in names if name in columns]
names = OrderedDict(names)
# Load input and constants
if load_input:
if dask:
store.close()
input = dd.read_hdf(store_name, prefix + 'input')
else:
input = store[prefix + 'input']
if nustar_to_lognustar:
input = convert_nustar(input)
else:
input = pd.DataFrame()
try:
store.open()
const = store[prefix + 'constants']
store.close()
except ValueError as ee:
# If pickled with a too new version, old python version cannot read it
warnings.warn('Could not load const.. Skipping for now')
const = pd.Series()
store.open()
if has_flattened(store) and (return_all(columns) or not have_sep(columns)):
#print('Taking "old" code path')
if return_all(columns):
if dask:
data = dd.read_hdf(store_name, prefix + 'flattened', chunksize=8192*10)
else:
data = store.select(prefix + 'flattened')
elif return_no(columns):
data = pd.DataFrame(index=input.index)
else:
if dask:
data = dd.read_hdf(store_name, prefix + 'flattened', columns=columns)
else:
storer = store.get_storer(prefix + 'flattened')
if storer.format_type == 'fixed':
data = store.select(prefix + 'flattened')
not_in_flattened = [col not in data.columns for col in columns]
if any(not_in_flattened):
raise Exception('Could not find {!s} in store {!s}'.format([col for not_in, col in zip(not_in_flattened, columns) if not_in], store))
else:
print("Not implemented yet, but shouldn't happen anyway.. Contact Karel")
from IPython import embed
embed()
else:
data = store.select(prefix + 'flattened', columns=columns)
else: #If no flattened
#print('Taking "new" code path')
if not have_sep(columns):
raise Exception('Could not find {!s} in store {!s}'.format(columns, store))
if not return_no(columns):
if dask:
data = dd.read_hdf(store_name, '/output/*', columns=columns, chunksize=8192*10)
elif fast:
output = []
for varname, name in names.items():
var = store[varname]
var.name = name
output.append(var)
data = pd.concat(output, axis=1)
del output
else:
if (mode != 'update') and (mode != 'bare'):
data = store[first(names)[0]].to_frame()
elif mode == 'update':
df = store[first(names)[0]]
data = pd.DataFrame(columns=names.values(), index=df.index)
df.name = first(names)[1]
data.update(df, raise_conflict=True)
elif mode == 'bare':
if not load_input:
raise Exception('Need to load input for mode {!s}'.format(mode))
raw_data = np.empty([len(input), len(names)])
ii = 0
varname = first(names)[0]
df = store[varname]
if df.index.equals(input.index):
raw_data[:, ii] = df.values
else:
raise Exception('Nonmatching index on {!s}!'.format(varname))
for ii, (varname, name) in enumerate(names.items()):
if ii == 0:
continue
if ('input' not in varname) and ('constants' not in varname):
if mode == 'join':
data = data.join(store[varname], how=how)
elif mode == 'concat':
data = pd.concat([data, store[varname]], axis=1, join='outer', copy=False)
elif mode == 'merge':
data = data.merge(store[varname].to_frame(), left_index=True, right_index=True,
how=how, copy=False)
elif mode == 'assign':
data = data.assign(**{name: store[varname]})
elif mode == 'update':
df = store[varname]
df.name = name
data.update(df, raise_conflict=True)
elif mode == 'bare':
df = store[varname].reindex(index=input.index)
if df.index.equals(input.index):
raw_data[:, ii] = df.values
else:
raise Exception('Nonmatching index on {!s}!'.format(varname))
del df
gc.collect()
if mode == 'bare':
data = pd.DataFrame(raw_data, columns=names.values(), index=input.index)
else: #Don't return any data
data = pd.DataFrame(index=input.index)
store.close()
gc.collect()
return input, data, const
| 8,807 | 0 | 113 |
d9329a55db13e5baab08945f156f37224d82a09e | 9,688 | py | Python | src/nn.py | mountain/planetarium | 14c5a75f9ac0be36f28d059c7bf7a77635d617da | [
"MIT"
] | 1 | 2018-03-03T18:58:01.000Z | 2018-03-03T18:58:01.000Z | src/nn.py | mountain/planetarium | 14c5a75f9ac0be36f28d059c7bf7a77635d617da | [
"MIT"
] | null | null | null | src/nn.py | mountain/planetarium | 14c5a75f9ac0be36f28d059c7bf7a77635d617da | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('Agg')
import sys
import time
import numpy as np
from physics import ode, hamilton, nbody
import unit.au as au
from os import environ
xp = np
if environ.get('CUDA_HOME') is not None:
xp = np
import torch as th
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from flare.learner import StandardLearner, cast
from flare.nn.nri import MLPEncoder, MLPDecoder, get_tril_offdiag_indices, get_triu_offdiag_indices
from flare.nn.nri import gumbel_softmax, my_softmax, encode_onehot, nll_gaussian, kl_categorical_uniform
from flare.dataset.decorators import attributes, segment, divid, sequential, shuffle, data, rebatch
epsilon = 0.00000001
MSCALE = 10
VSCALE = 100.0
SCALE = 10.0
BATCH = 5
REPEAT = 12
SIZE = 8
BODYCOUNT = 3
lr = 1e-5
mass = None
sun = None
lasttime = time.time()
@rebatch(repeat=REPEAT)
@shuffle(shufflefn, repeat=REPEAT)
@data()
@sequential(['xs.d'], ['ys.d'], layout_in=[SIZE, BATCH, BODYCOUNT, 8], layout_out=[3 * SIZE, BATCH, BODYCOUNT, 8])
@divid(lengths=[SIZE, 3 * SIZE], names=['xs', 'ys'])
@segment(segment_size = 4 * SIZE)
@attributes('yr', 'd')
mse = nn.MSELoss()
model = Model(bsize=BATCH)
optimizer = optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-8)
counter = 0
triu_indices = get_triu_offdiag_indices(8 * SIZE * BODYCOUNT)
tril_indices = get_tril_offdiag_indices(8 * SIZE * BODYCOUNT)
if th.cuda.is_available():
triu_indices = triu_indices.cuda()
tril_indices = tril_indices.cuda()
def set_aspect_equal_3d(ax):
"""Fix equal aspect bug for 3D plots."""
xlim = ax.get_xlim3d()
ylim = ax.get_ylim3d()
zlim = ax.get_zlim3d()
from numpy import mean
xmean = mean(xlim)
ymean = mean(ylim)
zmean = mean(zlim)
plot_radius = max([abs(lim - mean_)
for lims, mean_ in ((xlim, xmean),
(ylim, ymean),
(zlim, zmean))
for lim in lims])
ax.set_xlim3d([xmean - plot_radius, xmean + plot_radius])
ax.set_ylim3d([ymean - plot_radius, ymean + plot_radius])
ax.set_zlim3d([zmean - plot_radius, zmean + plot_radius])
learner = StandardLearner(model, predict, loss, optimizer, batch=BATCH * REPEAT)
if __name__ == '__main__':
for epoch in range(10000):
print('.')
learner.learn(dataset(), dataset())
print('--------------------------------')
errsum = 0.0
for epoch in range(1000):
err = learner.test(dataset())
print(err)
errsum += err
print('--------------------------------')
print(errsum / 1000)
| 29.357576 | 130 | 0.54872 | # -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('Agg')
import sys
import time
import numpy as np
from physics import ode, hamilton, nbody
import unit.au as au
from os import environ
xp = np
if environ.get('CUDA_HOME') is not None:
xp = np
import torch as th
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from flare.learner import StandardLearner, cast
from flare.nn.nri import MLPEncoder, MLPDecoder, get_tril_offdiag_indices, get_triu_offdiag_indices
from flare.nn.nri import gumbel_softmax, my_softmax, encode_onehot, nll_gaussian, kl_categorical_uniform
from flare.dataset.decorators import attributes, segment, divid, sequential, shuffle, data, rebatch
epsilon = 0.00000001
MSCALE = 10
VSCALE = 100.0
SCALE = 10.0
BATCH = 5
REPEAT = 12
SIZE = 8
BODYCOUNT = 3
lr = 1e-5
mass = None
sun = None
lasttime = time.time()
def msize(x):
return int(1 + 6 * x)
def shufflefn(xs, ys):
# permute on different input
perm = np.arange(xs.shape[-2])
np.random.shuffle(perm)
xs = xs[:, :, :, perm, :]
# permute on different out
perm = np.arange(ys.shape[-2])
np.random.shuffle(perm)
ys = ys[:, :, :, perm, :]
# permute on different space dims
seg = np.arange(2, 5, 1)
np.random.shuffle(seg)
perm = np.concatenate((np.array([0, 1]), seg, seg + 3))
xs = xs[:, :, :, :, perm]
ys = ys[:, :, :, :, perm]
return xs, ys
def generator(sz, yrs, btch):
global lasttime
lasttime = time.time()
global mass
mass = xp.random.rand(btch, sz) * MSCALE
x = xp.random.rand(btch, sz, 3) * SCALE
v = xp.zeros([btch, sz, 3])
center = (np.sum(mass.reshape([btch, sz, 1]) * x, axis=1) / np.sum(mass, axis=1).reshape([btch, 1])).reshape([btch, 1, 3])
x = x - center
solver = ode.verlet(nbody.acceleration_of(au, mass))
h = hamilton.hamiltonian(au, mass)
lastha = h(x, v, limit=sz)
t = 0
lastyear = 0
for epoch in range(yrs * 144):
t, x, v = solver(t, x, v, 0.1)
center = (np.sum(mass.reshape([btch, sz, 1]) * x, axis=1) / np.sum(mass, axis=1).reshape([btch, 1])).reshape([btch, 1, 3])
x = x - center
year = int(t)
if 10 * int(year / 10) == lastyear + 10:
lastyear = year
rtp = x / SCALE
rtv = v
ha = h(x, v, limit=sz)
dha = ha - lastha
inputm = mass[:, :].reshape([btch, sz, 1]) / MSCALE
inputp = xp.tanh(rtp.reshape([btch, sz, 3]))
inputv = xp.tanh(rtv.reshape([btch, sz, 3]) * VSCALE)
inputdh = xp.tanh(dha.reshape([btch, sz, 1]) / au.G * SCALE)
input = np.concatenate([inputm, inputdh, inputp, inputv], axis=2).reshape([btch, sz * 8])
yield year, input
lastha = ha
#print('-----------------------------')
#print('m:', np.max(inputm), np.min(inputm))
#print('p:', np.max(inputp), np.min(inputp))
#print('v:', np.max(inputv), np.min(inputv))
#print('h:', np.max(inputdh), np.min(inputdh))
#print('-----------------------------')
#sys.stdout.flush()
print('gen:', time.time() - lasttime)
sys.stdout.flush()
lasttime = time.time()
@rebatch(repeat=REPEAT)
@shuffle(shufflefn, repeat=REPEAT)
@data()
@sequential(['xs.d'], ['ys.d'], layout_in=[SIZE, BATCH, BODYCOUNT, 8], layout_out=[3 * SIZE, BATCH, BODYCOUNT, 8])
@divid(lengths=[SIZE, 3 * SIZE], names=['xs', 'ys'])
@segment(segment_size = 4 * SIZE)
@attributes('yr', 'd')
def dataset():
return generator(BODYCOUNT, 4 * SIZE, BATCH)
class Evolve(nn.Module):
def __init__(self):
super(Evolve, self).__init__()
w = SIZE
c = 8
d = c * w
off_diag = np.ones([BODYCOUNT, BODYCOUNT]) - np.eye(BODYCOUNT)
self.rel_rec = Variable(cast(np.array(encode_onehot(np.where(off_diag)[1]), dtype=np.float32)))
self.rel_send = Variable(cast(np.array(encode_onehot(np.where(off_diag)[0]), dtype=np.float32)))
self.encoder = MLPEncoder(d, 2048, 1)
self.decoder = MLPDecoder(c, 1, 2048, 2048, 2048)
def forward(self, x, w=SIZE):
mo = x[:, 0:1, :, :]
out = x.permute(0, 3, 2, 1).contiguous()
logits = self.encoder(out, self.rel_rec, self.rel_send)
edges = gumbel_softmax(logits)
self.prob = my_softmax(logits, -1)
out = self.decoder(out, edges, self.rel_rec, self.rel_send, w)
out = out.permute(0, 3, 2, 1).contiguous()
hn = out[:, 1:2, :, :]
pn = out[:, 2:5, :, :]
vn = out[:, 5:8, :, :]
out = th.cat([mo, hn, pn, vn], dim=1)
print('evolvm:', th.max(mo.data), th.min(mo.data))
print('evolvh:', th.max(hn.data), th.min(hn.data))
print('evolvx:', th.max(pn.data), th.min(pn.data))
print('evolvv:', th.max(vn.data), th.min(vn.data))
sys.stdout.flush()
return out
class Model(nn.Module):
def __init__(self, bsize=1):
super(Model, self).__init__()
self.batch = bsize
self.evolve = Evolve()
def forward(self, x):
x = x.permute(0, 2, 4, 1, 3).contiguous()
sr, sb, sc, ss, si = tuple(x.size())
state = x.view(sr * sb, sc, ss, si)
result = Variable(cast(np.zeros([sr * sb, 8, 3 * SIZE, BODYCOUNT])))
for i in range(4 * SIZE):
state = self.evolve(state, w=1)
if i >= SIZE:
result[:, :, i - SIZE, :] = state[:, :, 0, :]
return result
mse = nn.MSELoss()
model = Model(bsize=BATCH)
optimizer = optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-8)
def predict(xs):
global lasttime
print('cns:', time.time() - lasttime)
sys.stdout.flush()
lasttime = time.time()
result = model(xs)
return result
counter = 0
triu_indices = get_triu_offdiag_indices(8 * SIZE * BODYCOUNT)
tril_indices = get_tril_offdiag_indices(8 * SIZE * BODYCOUNT)
if th.cuda.is_available():
triu_indices = triu_indices.cuda()
tril_indices = tril_indices.cuda()
def set_aspect_equal_3d(ax):
"""Fix equal aspect bug for 3D plots."""
xlim = ax.get_xlim3d()
ylim = ax.get_ylim3d()
zlim = ax.get_zlim3d()
from numpy import mean
xmean = mean(xlim)
ymean = mean(ylim)
zmean = mean(zlim)
plot_radius = max([abs(lim - mean_)
for lims, mean_ in ((xlim, xmean),
(ylim, ymean),
(zlim, zmean))
for lim in lims])
ax.set_xlim3d([xmean - plot_radius, xmean + plot_radius])
ax.set_ylim3d([ymean - plot_radius, ymean + plot_radius])
ax.set_zlim3d([zmean - plot_radius, zmean + plot_radius])
def loss(xs, ys, result):
global counter, lasttime
counter = counter + 1
ys = ys.permute(0, 2, 4, 1, 3).contiguous()
sr, sb, sc, ss, si = tuple(ys.size())
ys = ys.view(sr * sb, sc, ss, si)
ms = ys[:, 0:1, :, :]
ps = ys[:, 2:5, :, :]
vs = ys[:, 5:8, :, :]
gm = result[:, 0:1, :, :]
gp = result[:, 2:5, :, :]
gv = result[:, 5:8, :, :]
loss_nll = nll_gaussian(ys, result, 5e-5)
loss_kl = kl_categorical_uniform(model.evolve.prob, BODYCOUNT, 1)
print('-----------------------------')
print('dur:', time.time() - lasttime)
print('per:', th.mean(th.sqrt((ps - gp) * (ps - gp)).data))
print('ver:', th.mean(th.sqrt((vs - gv) * (vs - gv)).data))
print('mer:', th.mean(th.sqrt((ms - gm) * (ms - gm)).data))
print('lss:', th.mean(loss_nll.data))
print('lkl:', th.mean(loss_kl.data))
print('-----------------------------')
sys.stdout.flush()
lasttime = time.time()
for param_group in optimizer.param_groups:
param_group['lr'] = lr * (0.1 ** (counter // 100))
if counter % 1 == 0:
if th.cuda.is_available():
truth = ps.data.cpu().numpy()[0, :, :, :]
guess = result.data.cpu().numpy()[0, :, :, :]
gmass = gm[0, 0, 0, :].data.cpu().numpy()
tmass = ms[0, 0, 0, :].data.cpu().numpy()
else:
truth = ps.data.numpy()[0, :, :, :]
guess = result.data.numpy()[0, :, :, :]
gmass = gm[0, 0, 0, :].data.numpy()
tmass = ms[0, 0, 0, :].data.numpy()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_aspect('equal')
ax.plot(truth[0, :, 0], truth[1, :, 0], truth[2, :, 0], 'ro', markersize=msize(tmass[0]))
ax.plot(truth[0, :, 1], truth[1, :, 1], truth[2, :, 1], 'go', markersize=msize(tmass[1]))
ax.plot(truth[0, :, 2], truth[1, :, 2], truth[2, :, 2], 'bo', markersize=msize(tmass[2]))
ax.plot(guess[0, :, 0], guess[1, :, 0], guess[2, :, 0], 'r+', markersize=msize(gmass[0]))
ax.plot(guess[0, :, 1], guess[1, :, 1], guess[2, :, 1], 'g+', markersize=msize(gmass[1]))
ax.plot(guess[0, :, 2], guess[1, :, 2], guess[2, :, 2], 'b+', markersize=msize(gmass[2]))
set_aspect_equal_3d(ax)
plt.savefig('data/3body.png')
plt.close()
return loss_nll + loss_kl
learner = StandardLearner(model, predict, loss, optimizer, batch=BATCH * REPEAT)
if __name__ == '__main__':
for epoch in range(10000):
print('.')
learner.learn(dataset(), dataset())
print('--------------------------------')
errsum = 0.0
for epoch in range(1000):
err = learner.test(dataset())
print(err)
errsum += err
print('--------------------------------')
print(errsum / 1000)
| 6,617 | 5 | 289 |
49ad0529acc7b30e818083fbddf61cedb7ec9149 | 1,616 | py | Python | test_question4.py | fmakawa/Practice | 7f6eaa1dde4e46088ca5dcee76de1bb56a363238 | [
"MIT"
] | null | null | null | test_question4.py | fmakawa/Practice | 7f6eaa1dde4e46088ca5dcee76de1bb56a363238 | [
"MIT"
] | null | null | null | test_question4.py | fmakawa/Practice | 7f6eaa1dde4e46088ca5dcee76de1bb56a363238 | [
"MIT"
] | null | null | null | """
Question 4
Level 1
Question:
Write a program which accepts a sequence of comma-separated numbers from console and generate a list and a tuple which contains every number.
Suppose the following input is supplied to the program:
34,67,55,33,12,98
Then, the output should be:
['34', '67', '55', '33', '12', '98']
('34', '67', '55', '33', '12', '98')
Hints:
In case of input data being supplied to the question, it should be assumed to be a console input.
tuple() method can convert list to tuple
"""
import unittest
from unittest.mock import patch
from question4 import listicle, tuplicle, listpicle
suite = unittest.TestLoader().loadTestsFromTestCase(TestDict)
unittest.TextTestRunner(verbosity=2).run(suite)
| 36.727273 | 141 | 0.61448 | """
Question 4
Level 1
Question:
Write a program which accepts a sequence of comma-separated numbers from console and generate a list and a tuple which contains every number.
Suppose the following input is supplied to the program:
34,67,55,33,12,98
Then, the output should be:
['34', '67', '55', '33', '12', '98']
('34', '67', '55', '33', '12', '98')
Hints:
In case of input data being supplied to the question, it should be assumed to be a console input.
tuple() method can convert list to tuple
"""
import unittest
from unittest.mock import patch
from question4 import listicle, tuplicle, listpicle
class TestDict(unittest.TestCase):
@patch('builtins.input', lambda *args: '34,67,55,33,12,98')
def test_list(self):
d=listicle()
self.assertEqual(d, ['34', '67', '55', '33', '12', '98'], "Supposed to equal ['34', '67', '55', '33', '12', '98']")
@patch('builtins.input', lambda *args: '34,67,55,33,12,98')
def test_tuple(self):
d = tuplicle()
self.assertEqual(d, ('34', '67', '55', '33', '12', '98'),"Supposed to equal ('34', '67', '55', '33', '12', '98')")
@patch('builtins.input', lambda *args: '34,67,55,33,12,98')
def test_listpicle(self):
d = listpicle()
print(d)
self.assertEqual(d[0], ['34', '67', '55', '33', '12', '98'],"Supposed to equal ['34', '67', '55', '33', '12', '98']")
self.assertEqual(d[1], ('34', '67', '55', '33', '12', '98'),"Supposed to equal ('34', '67', '55', '33', '12', '98')")
suite = unittest.TestLoader().loadTestsFromTestCase(TestDict)
unittest.TextTestRunner(verbosity=2).run(suite)
| 587 | 289 | 23 |
938385e28f9b2ed19e39302cb8539a14a9ba38f9 | 225 | py | Python | ex007.py | EduFelix/Exercicios-Python | 4dc6a33653f8171684a8628f5629b137b4bfef94 | [
"MIT"
] | null | null | null | ex007.py | EduFelix/Exercicios-Python | 4dc6a33653f8171684a8628f5629b137b4bfef94 | [
"MIT"
] | null | null | null | ex007.py | EduFelix/Exercicios-Python | 4dc6a33653f8171684a8628f5629b137b4bfef94 | [
"MIT"
] | null | null | null | n1 = float(input('Digite a primeira nota?'))
n2 = float(input('Digite a segunda nota?'))
media = (n1 + n2)/ 2
print("Primeira nota do aluno {}, \n Segunda nota do aluno {}\n Média das notas do aluno {}".format(n1, n2, media)) | 56.25 | 115 | 0.666667 | n1 = float(input('Digite a primeira nota?'))
n2 = float(input('Digite a segunda nota?'))
media = (n1 + n2)/ 2
print("Primeira nota do aluno {}, \n Segunda nota do aluno {}\n Média das notas do aluno {}".format(n1, n2, media)) | 0 | 0 | 0 |
06256fd0dd0875fdd476dc40b3f7caf74bf649c8 | 2,224 | py | Python | algorithms_comparisons/analysis/benchmark.py | eryktrzeciakiewicz/algorithms-comparisons | 101cbb4ccf13c3dc607b0e6c192ab2237c78b13e | [
"MIT"
] | null | null | null | algorithms_comparisons/analysis/benchmark.py | eryktrzeciakiewicz/algorithms-comparisons | 101cbb4ccf13c3dc607b0e6c192ab2237c78b13e | [
"MIT"
] | null | null | null | algorithms_comparisons/analysis/benchmark.py | eryktrzeciakiewicz/algorithms-comparisons | 101cbb4ccf13c3dc607b0e6c192ab2237c78b13e | [
"MIT"
] | null | null | null | import sys
import abc
from algorithms_comparisons.utility.timer import timed
| 38.344828 | 99 | 0.692896 | import sys
import abc
from algorithms_comparisons.utility.timer import timed
class Benchmark(abc.ABC):
@abc.abstractmethod
def measure_time_of_execution(self,timed_functions, parameters):
pass
@abc.abstractmethod
def build_output(self, functions, time_results, parameters):
pass
def run(self, functions, parameters):
timed_functions = self.decorate_functions_with_timer(functions)
time_results = self.measure_time_of_execution(timed_functions, parameters)
output = self.build_output(functions, time_results, parameters)
return output
def decorate_functions_with_timer(self, functions):
timed_functions = []
for function in functions:
timed_functions.append(timed(function))
return timed_functions
class OverallPerformanceBenchmark(Benchmark):
def measure_time_of_execution(self, timed_functions, parameters):
time_results = [0] * len(timed_functions)
for index, timed_function in enumerate(timed_functions):
time_result = 0
for parameter in parameters:
_, time = timed_function(parameter)
time_result += time
time_results[index] = time_result
return time_results
def build_output(self, functions, time_results, parameters):
function_names = [function.__name__ for function in functions]
return (function_names, time_results)
class ProblemSizePerformanceBenchmark(Benchmark):
def measure_time_of_execution(self, timed_functions, parameters):
num_trials = 20
time_results = [[0 for x in range(len(parameters))] for fun in range(len(timed_functions))]
for trial in range(num_trials):
for findex, timed_function in enumerate(timed_functions):
for pindex, param in enumerate(parameters):
_, time = timed_function(param)
time_results[findex][pindex] += time/num_trials
return time_results
def build_output(self, functions, time_results, parameters):
function_names = [function.__name__ for function in functions]
return (function_names, parameters, time_results)
| 1,750 | 220 | 176 |
9d3e5450887e6602ff1d30172f1a8cc5caf85669 | 78 | py | Python | main.py | BigSmokeCuba/BotTelegram | 65636ff1ce1bee27575144b21ac9bdd3c69a2735 | [
"MIT"
] | null | null | null | main.py | BigSmokeCuba/BotTelegram | 65636ff1ce1bee27575144b21ac9bdd3c69a2735 | [
"MIT"
] | null | null | null | main.py | BigSmokeCuba/BotTelegram | 65636ff1ce1bee27575144b21ac9bdd3c69a2735 | [
"MIT"
] | null | null | null | import bot.app
from threading import Thread
Thread(target=bot.app).start() | 19.5 | 30 | 0.769231 | import bot.app
from threading import Thread
Thread(target=bot.app).start() | 0 | 0 | 0 |
5bb4f222e235e9d7070a669c4bfcfbbeadb8de75 | 1,406 | py | Python | webapp/blaster/quickstart.py | 128technology/blaster | ed4f94a8d068e7ee522e246f61ba3425a68041d2 | [
"MIT"
] | null | null | null | webapp/blaster/quickstart.py | 128technology/blaster | ed4f94a8d068e7ee522e246f61ba3425a68041d2 | [
"MIT"
] | 17 | 2020-09-16T09:32:32.000Z | 2021-07-22T18:54:13.000Z | webapp/blaster/quickstart.py | 128technology/blaster | ed4f94a8d068e7ee522e246f61ba3425a68041d2 | [
"MIT"
] | null | null | null | import functools
from flask import (
current_app, Blueprint, flash, Flask, g, redirect, render_template, request, session, url_for, jsonify
)
import json
from blaster.db import get_db
from . import constants
bp = Blueprint('quickstart', __name__, url_prefix='/quickstart')
@bp.route('/<instance>')
| 33.47619 | 139 | 0.668563 | import functools
from flask import (
current_app, Blueprint, flash, Flask, g, redirect, render_template, request, session, url_for, jsonify
)
import json
from blaster.db import get_db
from . import constants
bp = Blueprint('quickstart', __name__, url_prefix='/quickstart')
@bp.route('/<instance>')
def instantiate(instance=None):
db = get_db()
node_row = db.execute('SELECT quickstart_id from node WHERE identifier = ?', (instance,)).fetchone()
if node_row is None:
qs_row = db.execute('SELECT node_name, asset_id, config FROM quickstart WHERE default_quickstart > 0').fetchone()
else:
if node_row[0] is None:
qs_row = db.execute('SELECT node_name, asset_id, config FROM quickstart WHERE default_quickstart > 0').fetchone()
else:
qs_row = db.execute('SELECT node_name, asset_id, config FROM quickstart WHERE id = ?', (node_row['quickstart_id'],)).fetchone()
if qs_row is None:
return jsonify(error="Could not find a specific or default quickstart"), 404
response = {}
quickstart = {
'a': qs_row['asset_id'],
'n': qs_row['node_name'],
'c': qs_row['config']
}
response['quickstart'] = json.dumps(quickstart)
response['password'] = None
db.execute('UPDATE node SET status = ? WHERE identifier = ?', ('Bootstrapped', instance))
db.commit()
return jsonify(response)
| 1,076 | 0 | 22 |
251449e248fb046c8e513ed9c8761edc71196595 | 306 | py | Python | close-server.py | cyanobacterium/Minecraft-Automated-Forge-Server | 587df4dc8100415a6b0d87d4f1c144c98a88098a | [
"MIT"
] | null | null | null | close-server.py | cyanobacterium/Minecraft-Automated-Forge-Server | 587df4dc8100415a6b0d87d4f1c144c98a88098a | [
"MIT"
] | 1 | 2016-09-17T13:04:55.000Z | 2016-09-19T18:34:29.000Z | close-server.py | cyanobacterium/Minecraft-Automated-Forge-Server | 587df4dc8100415a6b0d87d4f1c144c98a88098a | [
"MIT"
] | null | null | null |
import os
local_dir = os.path.dirname(os.path.realpath(__file__))
dir_name = get_filename(local_dir)
command_file = local_dir+"/command-stack.txt"
f_out = open(command_file, "w")
f_out.write("stop\n")
f_out.close()
| 19.125 | 56 | 0.673203 |
import os
def get_filename(path):
return path.replace("\\","/").split("/")[-1]
local_dir = os.path.dirname(os.path.realpath(__file__))
dir_name = get_filename(local_dir)
command_file = local_dir+"/command-stack.txt"
f_out = open(command_file, "w")
f_out.write("stop\n")
f_out.close()
| 49 | 0 | 25 |
31e3fc4c3daa23bcf9ebfa1f70cac64480721f36 | 405 | py | Python | app/django/photo/migrations/0004_rename_dimentions_descriptionimage_dimensions.py | Murabei-OpenSource-Codes/ai-photo-sampler--backend | c098a5cb544da89623a000d87daa18f22cfecfce | [
"BSD-3-Clause"
] | null | null | null | app/django/photo/migrations/0004_rename_dimentions_descriptionimage_dimensions.py | Murabei-OpenSource-Codes/ai-photo-sampler--backend | c098a5cb544da89623a000d87daa18f22cfecfce | [
"BSD-3-Clause"
] | null | null | null | app/django/photo/migrations/0004_rename_dimentions_descriptionimage_dimensions.py | Murabei-OpenSource-Codes/ai-photo-sampler--backend | c098a5cb544da89623a000d87daa18f22cfecfce | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 3.2.4 on 2022-04-12 02:21
from django.db import migrations
| 21.315789 | 71 | 0.62716 | # Generated by Django 3.2.4 on 2022-04-12 02:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('photo', '0003_rename_dimension_descriptionimage_dimentions'),
]
operations = [
migrations.RenameField(
model_name='descriptionimage',
old_name='dimentions',
new_name='dimensions',
),
]
| 0 | 299 | 23 |