hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
03b7d55b2aedbf78ae254e0d5c1ac7c91794bb37 | 4,615 | py | Python | src/code/data-structures/DoublyLinkedList/DoublyLinkedList.py | angshumanHalder/discord-bot | 5b3b6042c901a1563abeee48ee8f267aa5c79fe2 | [
"MIT"
] | null | null | null | src/code/data-structures/DoublyLinkedList/DoublyLinkedList.py | angshumanHalder/discord-bot | 5b3b6042c901a1563abeee48ee8f267aa5c79fe2 | [
"MIT"
] | null | null | null | src/code/data-structures/DoublyLinkedList/DoublyLinkedList.py | angshumanHalder/discord-bot | 5b3b6042c901a1563abeee48ee8f267aa5c79fe2 | [
"MIT"
] | null | null | null | class DoublyLinkedList:
""" Private Node Class """
class _Node:
def __init__(self, value):
self.value = value
self.next = None
self.prev = None
def __str__(self):
return self.value
def __init__(self):
self.head = None
self.tail = None
self.length = 0
def push_beginning(self, value):
node = self._Node(value)
if self.length == 0:
self.head = node
self.tail = node
else:
node.next = self.head
self.head.prev = node
self.head = node
self.length += 1
return True
def push_end(self, value):
node = self._Node(value)
if self.length == 0:
self.head = node
self.tail = node
else:
node.prev = self.tail
self.tail.next = node
self.tail = node
self.length += 1
return True
def push_at_index(self, value, index):
if self._is_empty():
raise IndexError("List is empty")
self._is_out_of_bounds(index)
if index == 0:
self.push_beginning(value)
if index >= self.length - 1:
self.push_end(value)
else:
node = self._Node(value)
i = 0
temp_node = self.head
while i < index - 1:
temp_node = temp_node.next
i += 1
node.next = temp_node.next
temp_node.next.prev = node
node.prev = temp_node
temp_node.next = node
self.length += 1
return True
def remove_beginning(self):
if self._is_empty():
raise IndexError("List is empty")
value = self.head.value
self.head = self.head.next
self.head.prev.next = None
self.head.prev = None
self.length -= 1
return value
def remove_end(self):
if self._is_empty():
raise IndexError("List is empty")
value = self.tail.value
self.tail = self.tail.prev
self.tail.next.prev = None
self.tail.next = None
self.length -= 1
return value
def remove_at_index(self, index):
if self._is_empty():
raise IndexError("List is empty")
self._is_out_of_bounds(index)
if index == 0:
self.remove_beginning()
if index >= self.length - 1:
self.remove_end()
else:
i = 0
temp_node = self.head
while i < index - 1:
temp_node = temp_node.next
i += 1
node_remove = temp_node.next
value = node_remove.value
temp_node.next = node_remove.next
node_remove.next = None
temp_node.next.prev = temp_node
node_remove.prev = None
return value
def get_value_at(self, index):
if self._is_empty():
raise IndexError("List is empty")
self._is_out_of_bounds(index)
i = 0
temp_node = self.head
while i < index:
temp_node = temp_node.next
i += 1
return temp_node.value
def set_value_at(self, value, index):
if self._is_empty():
raise IndexError("List is empty")
self._is_out_of_bounds(index)
i = 0
temp_node = self.head
while i < index:
temp_node = temp_node.next
i += 1
temp_node.value = value
return True
def reverse_list(self):
temp_node_head = self.head
temp_node_tail = self.tail
i = 0
while i < int(self.length / 2):
temp_value = temp_node_tail.value
temp_node_tail.value = temp_node_head.value
temp_node_head.value = temp_value
temp_node_tail = temp_node_tail.prev
temp_node_head = temp_node_head.next
i += 1
return True
""" Helper methods """
def size(self):
return self.length
def _is_empty(self):
return self.length == 0
def _is_out_of_bounds(self, idx):
if idx >= self.length:
raise IndexError('Index out of bounds')
def __str__(self):
temp_node = self.head
lst_str = "["
while temp_node is not None:
lst_str += str(temp_node.value)
if temp_node.next is not None:
lst_str += ","
temp_node = temp_node.next
lst_str += "]"
return lst_str
| 27.470238 | 55 | 0.51896 | 4,612 | 0.99935 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.036403 |
03b930ecffd229e67ed2320a14aa54a5dedfa069 | 2,296 | py | Python | python/cuml/dask/linear_model/base.py | Pandinosaurus/cuml | 47f8577ca0c1bc621cb67f77e0b8dbcbe68b360e | [
"Apache-2.0"
] | 1 | 2021-02-01T00:01:29.000Z | 2021-02-01T00:01:29.000Z | python/cuml/dask/linear_model/base.py | mseneshen/cuml | 1c561de84739c31659acde639f1c80aedce3147c | [
"Apache-2.0"
] | null | null | null | python/cuml/dask/linear_model/base.py | mseneshen/cuml | 1c561de84739c31659acde639f1c80aedce3147c | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common import raise_exception_from_futures
from cuml.dask.common.comms import CommsContext
from cuml.dask.common.input_utils import DistributedDataHandler
from dask.distributed import wait
class BaseLinearModelSyncFitMixin(object):
def _fit(self, model_func, data, **kwargs):
for d in data:
d = self.client.persist(data)
data = DistributedDataHandler.create(data=data, client=self.client)
self.datatype = data.datatype
comms = CommsContext(comms_p2p=False, verbose=self.verbose)
comms.init(workers=data.workers)
data.calculate_parts_to_sizes(comms)
self.ranks = data.ranks
n_cols = d[0].shape[1]
lin_models = dict([(data.worker_info[wf[0]]["r"], self.client.submit(
model_func,
comms.sessionId,
self.datatype,
**self.kwargs,
pure=False,
workers=[wf[0]]))
for idx, wf in enumerate(data.worker_to_parts.items())])
lin_fit = dict([(wf[0], self.client.submit(
_func_fit,
lin_models[data.worker_info[wf[0]]["r"]],
wf[1],
data.total_rows,
n_cols,
data.parts_to_sizes[data.worker_info[wf[0]]["r"]],
data.worker_info[wf[0]]["r"],
pure=False,
workers=[wf[0]]))
for idx, wf in enumerate(data.worker_to_parts.items())])
wait(list(lin_fit.values()))
raise_exception_from_futures(list(lin_fit.values()))
comms.destroy()
return lin_models
def _func_fit(f, data, n_rows, n_cols, partsToSizes, rank):
return f.fit(data, n_rows, n_cols, partsToSizes, rank)
| 32.8 | 77 | 0.651132 | 1,377 | 0.599739 | 0 | 0 | 0 | 0 | 0 | 0 | 588 | 0.256098 |
03bb66fae4fd3ed09ed811e675a254e039ae716a | 2,686 | py | Python | utils/plot_part_dat.py | jeremiedecock/botsim | 73262092a8769c331edb96e083e32156f33bf948 | [
"MIT"
] | 1 | 2015-06-08T13:01:24.000Z | 2015-06-08T13:01:24.000Z | utils/plot_part_dat.py | jeremiedecock/botsim | 73262092a8769c331edb96e083e32156f33bf948 | [
"MIT"
] | null | null | null | utils/plot_part_dat.py | jeremiedecock/botsim | 73262092a8769c331edb96e083e32156f33bf948 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (jd.jdhp@gmail.com)
import numpy as np
import matplotlib.pyplot as plt
import math
import argparse
def parse_part_log_file(filename):
log_data = np.loadtxt(filename)
data_dict = {}
data_dict["time_sec"] = log_data[:, 0]
data_dict["position_x"] = log_data[:, 1]
data_dict["position_y"] = log_data[:, 2]
data_dict["position_z"] = log_data[:, 3]
data_dict["angle_x"] = log_data[:, 4]
data_dict["angle_y"] = log_data[:, 5]
data_dict["angle_z"] = log_data[:, 6]
data_dict["angle_w"] = log_data[:, 7]
data_dict["linear_velocity_x"] = log_data[:, 8]
data_dict["linear_velocity_y"] = log_data[:, 9]
data_dict["linear_velocity_z"] = log_data[:,10]
data_dict["angular_velocity_x"] = log_data[:,11]
data_dict["angular_velocity_y"] = log_data[:,12]
data_dict["angular_velocity_z"] = log_data[:,13]
data_dict["total_force_x"] = log_data[:,14]
data_dict["total_force_y"] = log_data[:,15]
data_dict["total_force_z"] = log_data[:,16]
data_dict["total_torque_x"] = log_data[:,17]
data_dict["total_torque_y"] = log_data[:,18]
data_dict["total_torque_z"] = log_data[:,19]
return data_dict
def main():
"""Main function"""
# PARSE OPTIONS ###################
parser = argparse.ArgumentParser(description='Plot one or several part(s).')
parser.add_argument('filenames', nargs='+', metavar='FILE', help='DAT file to read')
parser.add_argument("--title", "-t", help="set the title of the figure", metavar="STRING")
args = parser.parse_args()
title = args.title
# PLOT DATA #######################
fig = plt.figure(figsize=(16.0, 10.0))
#fig = plt.figure()
ax = fig.add_subplot(111)
#ax.grid(True)
for index, filename in enumerate(args.filenames):
print(index, filename)
data_dict = parse_part_log_file(filename)
ax.plot(data_dict["time_sec"], data_dict["position_z"], label=filename)
# TITLE AND LABELS ################
FONTSIZE = 26
FONTSIZE_S = 22
if title is None:
title = "Parts position with respect to time."
ax.set_title(title, fontsize=FONTSIZE)
ax.set_xlabel("Time (sec)", fontsize=FONTSIZE)
ax.set_ylabel("Position", fontsize=FONTSIZE)
ax.legend(loc='best', fontsize=FONTSIZE_S)
# SAVE FILES ######################
fig_filename = "parts.pdf"
plt.savefig(fig_filename)
# PLOT ############################
plt.show()
if __name__ == '__main__':
main()
| 29.195652 | 94 | 0.599032 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 848 | 0.315476 |
03bbae28c2cd4fb58b49c704d23f872cee7681d3 | 3,505 | py | Python | t.py | cmsirbu/gencfg | 5f201208ca55bdd2ddd67974129465d95ebf4af4 | [
"MIT"
] | 5 | 2016-03-09T19:50:54.000Z | 2018-10-12T03:05:23.000Z | t.py | cmsirbu/gencfg | 5f201208ca55bdd2ddd67974129465d95ebf4af4 | [
"MIT"
] | null | null | null | t.py | cmsirbu/gencfg | 5f201208ca55bdd2ddd67974129465d95ebf4af4 | [
"MIT"
] | 2 | 2019-06-28T10:34:52.000Z | 2019-09-16T23:56:49.000Z | #!/usr/bin/env python
"""A script that helps generate router configuration from templates.
"""
import os
import sys
import argparse
import csv
import jinja2
from jinja2 import meta
def get_template_var_list(config_template):
j2_env = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath='.'))
j2_template_source = j2_env.loader.get_source(j2_env, config_template)[0]
j2_parsed_content = j2_env.parse(j2_template_source)
return(meta.find_undeclared_variables(j2_parsed_content))
def generate_csv_header(config_template):
template_vars = sorted(list(get_template_var_list(config_template)))
pre, _ = os.path.splitext(config_template)
with open(pre + ".csv", "w") as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(template_vars)
print("Header variables saved to " + pre + ".csv")
def generate_config(config_template, config_data, config_outdir):
# init jinja2 environment
j2_env = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath='.'))
j2_template = j2_env.get_template(config_template)
# read csv data
totalrows = 0
with open(config_data) as csv_file:
# initialize reader object and protect against non-uniform csv files
# missing values will be empty strings
csv_reader = csv.DictReader(csv_file, restval="WARNING_VALUE_MISSING")
# check if all the template vars are found in the csv
if not all(x in csv_reader.fieldnames for x in get_template_var_list(config_template)):
sys.exit('Not all variables in {} are found in {}'.format(config_template, config_data))
# create config output dir
out_directory = os.path.join(os.path.dirname(config_template), config_outdir)
if not os.path.exists(out_directory):
os.makedirs(out_directory)
for row in csv_reader:
# render template for each row from the csv file and write it to disk
j2_rendered_template = j2_template.render(row)
out_filename = os.path.join(out_directory, "cfg-" + str(csv_reader.line_num-1))
with open(out_filename, mode="w") as out_file:
out_file.write(j2_rendered_template)
totalrows += 1
print("Generated {} files in {}/".format(totalrows, out_directory))
def main(arguments):
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('operation', help="gencfg, csvheader")
parser.add_argument('-t', '--template', help="config template file (jinja2)")
parser.add_argument('-d', '--data', help="config data file (csv)")
parser.add_argument('-o', '--outdir', help="output directory (default=configs)", default="configs")
args = parser.parse_args(arguments)
if args.operation == "gencfg":
if args.template and args.data:
generate_config(args.template, args.data, args.outdir)
else:
sys.exit("Template (-t) and data (-d) files must be specified.")
elif args.operation == "csvheader":
if args.template:
generate_csv_header(args.template)
else:
sys.exit("Template (-t) file must be specified.")
else:
sys.exit("Invalid operation. Use gencfg to apply data to a template or " +
"csvheader to extract variables from a template.")
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 38.097826 | 103 | 0.681598 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 942 | 0.268759 |
03bc9a5f0a747abf5d82393e3bb16961bae673ea | 2,607 | py | Python | examples/perf/rnn/simple_rnn.py | yuhonghong66/minpy | 2e44927ad0fbff9295e2acf6db636e588fdc5b42 | [
"Apache-2.0"
] | 1,271 | 2015-11-05T10:53:40.000Z | 2022-02-20T08:33:35.000Z | examples/perf/rnn/simple_rnn.py | yuhonghong66/minpy | 2e44927ad0fbff9295e2acf6db636e588fdc5b42 | [
"Apache-2.0"
] | 140 | 2016-04-07T02:55:19.000Z | 2019-08-02T06:01:53.000Z | examples/perf/rnn/simple_rnn.py | yuhonghong66/minpy | 2e44927ad0fbff9295e2acf6db636e588fdc5b42 | [
"Apache-2.0"
] | 144 | 2015-11-05T10:53:45.000Z | 2022-03-25T05:38:09.000Z | import sys
sys.path.insert(0, "../../python/")
import mxnet as mx
import numpy as np
from collections import namedtuple
import time
import math
RNNState = namedtuple("RNNState", ["h"])
RNNParam = namedtuple("RNNParam", ["i2h_weight", "i2h_bias",
"h2h_weight", "h2h_bias"])
RNNModel = namedtuple("RNNModel", ["rnn_exec", "symbol",
"init_states", "last_states",
"seq_data", "seq_labels", "seq_outputs",
"param_blocks"])
def rnn(num_hidden, in_data, prev_state, param, seqidx, layeridx):
i2h = mx.sym.FullyConnected(data=in_data,
weight=param.i2h_weight,
bias=param.i2h_bias,
num_hidden=num_hidden,
name="t%d_l%d_i2h" % (seqidx, layeridx))
if seqidx > 0:
h2h = mx.sym.FullyConnected(data=prev_state,
weight=param.h2h_weight,
bias=param.h2h_bias,
num_hidden=num_hidden,
name="t%d_l%d_h2h" % (seqidx, layeridx))
hidden = i2h + h2h
else:
hidden = i2h
hidden = mx.sym.Activation(data=hidden, act_type="tanh")
return RNNState(h=hidden)
def rnn_unroll(num_rnn_layer, seq_len, input_size,
num_hidden, num_label):
cls_weight = mx.sym.Variable("cls_weight")
cls_bias = mx.sym.Variable("cls_bias")
param_cells = []
for i in range(num_rnn_layer):
param_cells.append(RNNParam(i2h_weight = mx.sym.Variable("l%d_i2h_weight" % i),
i2h_bias = mx.sym.Variable("l%d_i2h_bias" % i),
h2h_weight = mx.sym.Variable("l%d_h2h_weight" % i),
h2h_bias = mx.sym.Variable("l%d_h2h_bias" % i)))
loss_all = []
ori_data = mx.sym.Variable('data')
label = mx.sym.Variable('softmax_label')
data_timestamp = mx.sym.SliceChannel(data=ori_data, num_outputs=seq_len, squeeze_axis=1)
hidden = None
for seqidx in range(seq_len):
in_data = data_timestamp[seqidx]
next_state = rnn(num_hidden, in_data=in_data,
prev_state=hidden,
param=param_cells[i],
seqidx=seqidx, layeridx=i)
hidden = next_state.h
fc = mx.sym.FullyConnected(data=hidden, weight=cls_weight, bias=cls_bias, num_hidden=num_label)
reg = mx.sym.LinearRegressionOutput(data=fc, label=label)
return reg
| 41.380952 | 99 | 0.557729 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 320 | 0.122746 |
03bce3dec0a0cfe68389b401fddaa0824f69003d | 366 | py | Python | Python/bench_2_1.py | nifty-swift/Nifty-benchmarks | 025128d6276a5dec0c89d1e464131c4e4dc22292 | [
"Apache-2.0"
] | 1 | 2018-03-28T05:51:21.000Z | 2018-03-28T05:51:21.000Z | Python/bench_2_1.py | nifty-swift/Nifty-benchmarks | 025128d6276a5dec0c89d1e464131c4e4dc22292 | [
"Apache-2.0"
] | null | null | null | Python/bench_2_1.py | nifty-swift/Nifty-benchmarks | 025128d6276a5dec0c89d1e464131c4e4dc22292 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from time import time
def bench_2_1():
trials = 100
elements = 1000000
times = []
for i in range(trials):
start = time()
M = np.random.randint(1,999, size=elements)
t = time()-start
times.append(t)
print 'Python - Benchmark 2.1: Average time = {} milliseconds'.format(np.mean(times)*1000) | 19.263158 | 94 | 0.601093 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.153005 |
03bd899281282039d2ef001ae52730a3c07b4cad | 685 | py | Python | utils/globals.py | Pawel095/RaidenPy | 0981a4921012f1951510a14f588645803c07010a | [
"Apache-2.0"
] | null | null | null | utils/globals.py | Pawel095/RaidenPy | 0981a4921012f1951510a14f588645803c07010a | [
"Apache-2.0"
] | null | null | null | utils/globals.py | Pawel095/RaidenPy | 0981a4921012f1951510a14f588645803c07010a | [
"Apache-2.0"
] | null | null | null | import arcade
from utils.loader import Loader
class keyFlags():
def __init__(self):
self.left = False
self.right = False
self.up = False
self.down = False
self.space = False
TITLE = "Raiden Py"
WINDOW = None
WIDTH = 600
HEIGHT = 600
SCREEN_WIDTH = WIDTH
SCREEN_HEIGHT = HEIGHT
bullets = []
enemies = []
l = Loader()
print("load Start")
l.load()
print("load End")
enemyBullets = arcade.SpriteList()
playerBullets = arcade.SpriteList()
enemies = arcade.SpriteList()
explosions = arcade.SpriteList()
playerKills = 0
def getPlayerKills():
return playerKills
def addOneToPlayerKills():
global playerKills
playerKills += 1
| 15.568182 | 35 | 0.675912 | 171 | 0.249635 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.048175 |
03bdc7816fc5e2a7235f327d0fd6fc22c8a483aa | 686 | py | Python | tryme.py | haamis/Turku-neural-parser-pipeline | 7aec4aef910c7deb2590453031bff2affe61ff26 | [
"Apache-2.0"
] | 94 | 2018-08-19T11:08:33.000Z | 2022-03-15T14:37:27.000Z | tryme.py | haamis/Turku-neural-parser-pipeline | 7aec4aef910c7deb2590453031bff2affe61ff26 | [
"Apache-2.0"
] | 31 | 2018-08-09T09:31:38.000Z | 2022-02-21T14:33:56.000Z | tryme.py | haamis/Turku-neural-parser-pipeline | 7aec4aef910c7deb2590453031bff2affe61ff26 | [
"Apache-2.0"
] | 31 | 2018-09-04T18:44:54.000Z | 2021-10-20T09:54:16.000Z | from tnparser.pipeline import read_pipelines, Pipeline
text1="I have a dog! Let's see what I can do with Silo.ai. :) Can I tokenize it? I think so! Heading: This is the heading And here continues a new sentence and there's no dot."
text2="Some other text, to see we can tokenize more stuff without reloading the model... :)"
# What do we have for English in models_en_ewt?
available_pipelines=read_pipelines("models_en_ewt/pipelines.yaml") # {pipeline_name -> its steps}
p=Pipeline(available_pipelines["tokenize"]) # launch the pipeline from the steps
for _ in range(1000):
print(p.parse(text1))
print(p.parse(text2))
| 45.733333 | 176 | 0.690962 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 409 | 0.59621 |
03be6746a1113ef106ddc989e296ffe5f60e66cf | 3,034 | py | Python | scripts/mkuidefaults.py | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
] | null | null | null | scripts/mkuidefaults.py | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
] | null | null | null | scripts/mkuidefaults.py | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
] | 1 | 2021-12-25T08:40:30.000Z | 2021-12-25T08:40:30.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
***************************************************************************
mkuidefaults.py
---------------------
Date : June 2013
Copyright : (C) 2013 by Juergen E. Fischer
Email : jef at norbit dot de
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Juergen E. Fischer'
__date__ = 'June 2013'
__copyright__ = '(C) 2013, Juergen E. Fischer'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
import sys
import struct
from PyQt5.QtCore import QCoreApplication, QSettings
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
QCoreApplication.setOrganizationName("QGIS")
QCoreApplication.setOrganizationDomain("qgis.org")
QCoreApplication.setApplicationName("QGIS3")
if len(sys.argv) == 1:
print("Usage: ./scripts/mkuidefaults.py \"location_to_ini\"")
sys.exit(1)
s = QSettings(sys.argv[1], QSettings.IniFormat)
ba = bytes(s.value("/UI/geometry"))
print
with open("src/app/ui_defaults.h", "w") as f:
f.write("#ifndef UI_DEFAULTS_H\n#define UI_DEFAULTS_H\n" +
"\nstatic const unsigned char defaultUIgeometry[] =\n{\n")
for chunk in chunks(ba, 16):
f.write(' {},\n'.format(
', '.join(map(hex, struct.unpack('B' * len(chunk), chunk)))))
f.write("};\n\nstatic const unsigned char defaultUIstate[] =\n{\n")
ba = bytes(s.value("/UI/state"))
for chunk in chunks(ba, 16):
f.write(' {},\n'.format(
', '.join(map(hex, struct.unpack('B' * len(chunk), chunk)))))
try:
ba = bytes(s.value("/app/LayoutDesigner/geometry"))
f.write("};\n\nstatic const unsigned char " +
"defaultLayerDesignerUIgeometry[] =\n{\n")
for chunk in chunks(ba, 16):
f.write(' {},\n'.format(
', '.join(map(hex, struct.unpack('B' * len(chunk), chunk)))))
except TypeError as ex:
pass
try:
ba = bytes(s.value("/app/LayoutDesigner/state"))
f.write("};\n\nstatic const unsigned char " +
"defaultLayerDesignerUIstate[] =\n{\n")
for chunk in chunks(ba, 16):
f.write(' {},\n'.format(
', '.join(map(hex, struct.unpack('B' * len(chunk), chunk)))))
except TypeError as ex:
pass
f.write("};\n\n#endif // UI_DEFAULTS_H\n")
| 33.340659 | 77 | 0.51648 | 0 | 0 | 76 | 0.025049 | 0 | 0 | 0 | 0 | 1,688 | 0.556361 |
03bebfed119097ef096738e58538d61c95362c67 | 31,667 | py | Python | fake_spectra/rate_network.py | xiaohanzai/fake_spectra | 170b42ac7732eb4f299617a1049cd3eabecfa3a7 | [
"MIT"
] | null | null | null | fake_spectra/rate_network.py | xiaohanzai/fake_spectra | 170b42ac7732eb4f299617a1049cd3eabecfa3a7 | [
"MIT"
] | null | null | null | fake_spectra/rate_network.py | xiaohanzai/fake_spectra | 170b42ac7732eb4f299617a1049cd3eabecfa3a7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""A rate network for neutral hydrogen following
Katz, Weinberg & Hernquist 1996, eq. 28-32."""
import os.path
import math
import numpy as np
import scipy.interpolate as interp
import scipy.optimize
class RateNetwork(object):
"""A rate network for neutral hydrogen following
Katz, Weinberg & Hernquist 1996, astro-ph/9509107, eq. 28-32.
Most internal methods are CamelCapitalized and follow a convention that
they are named like the process and then the ion they refer to.
eg:
CollisionalExciteHe0 is the neutral Helium collisional excitation rate.
RecombHp is the recombination rate for ionized hydrogen.
Externally useful methods (the API) are named like get_*.
These are:
get_temp() - gets the temperature from the density and internal energy.
get_cooling_rate() - gets the total cooling rate from density and internal energy.
get_neutral_fraction() - gets the neutral fraction from the rate network given density and internal energy.
Two useful helper functions:
get_equilib_ne() - gets the equilibrium electron density.
get_ne_by_nh() - gets the above, divided by the hydrogen density (Gadget reports this as ElectronAbundance).
Constructor arguments:
redshift - the redshift at which to evaluate the cooling. Affects the photoionization rate,
the Inverse Compton cooling and the self shielding threshold.
photo_factor - Factor by which to multiply the UVB amplitude.
f_bar - Baryon fraction. Omega_b / Omega_cdm.
converge - Tolerance to which the rate network should be converged.
selfshield - Flag to enable self-shielding following Rahmati 2013
cool - which cooling rate coefficient table to use.
Supported are: KWH (original Gadget rates)
Nyx (rates used in Nyx (Lukic 2015))
Sherwood (rates used in Sherwood simulations (Bolton 2017))
Default is Sherwood
recomb - which recombination rate table to use.
Supported are: C92 (Cen 1992, the Gadget default)
V96 (Verner & Ferland 1996, more accurate rates).
B06 (Badnell 2006 rates, current cloudy defaults. Very similar to V96).
collisional - Flag to enable collisional ionizations.
treecool_file - File to read a UV background from. Matches format used by Gadget.
"""
def __init__(self,redshift, photo_factor = 1., f_bar = 0.17, converge = 1e-7, selfshield=True, cool="Sherwood", recomb="V96", collisional=True, treecool_file="data/TREECOOL_ep_2018p"):
if recomb == "V96":
self.recomb = RecombRatesVerner96()
elif recomb == "B06":
self.recomb = RecombRatesBadnell()
else:
self.recomb = RecombRatesCen92()
self.photo = PhotoRates(treecool_file=treecool_file)
self.photo_factor = photo_factor
self.f_bar = f_bar
if cool == "KWH":
self.cool = CoolingRatesKWH92()
elif cool == "Sherwood":
self.cool = CoolingRatesSherwood()
elif cool == "Nyx":
self.cool = CoolingRatesNyx()
else:
raise ValueError("Not supported")
#Extra helium reionization photoheating model
self.hub = 0.7
self.he_thresh = 10
self.he_amp = 1
self.he_exp = 0
self.he_model_on = False
#proton mass in g
self.protonmass = 1.67262178e-24
self.redshift = redshift
self.converge = converge
self.selfshield = selfshield
self.collisional = collisional
zz = [0, 1, 2, 3, 4, 5, 6, 7, 8]
#Tables for the self-shielding correction. Note these are not well-measured for z > 5!
gray_opac = [2.59e-18,2.37e-18,2.27e-18, 2.15e-18, 2.02e-18, 1.94e-18, 1.82e-18, 1.71e-18, 1.60e-18]
self.Gray_ss = interp.InterpolatedUnivariateSpline(zz, gray_opac)
def get_temp(self, density, ienergy, helium=0.24):
"""Get the equilibrium temperature at given internal energy.
density is gas density in protons/cm^3
Internal energy is in J/kg == 10^-10 ergs/g.
helium is a mass fraction"""
ne = self.get_equilib_ne(density, ienergy, helium)
nh = density * (1-helium)
return self._get_temp(ne/nh, ienergy, helium)
def get_cooling_rate(self, density, ienergy, helium=0.24, photoheating=False):
"""Get the total cooling rate for a temperature and density. Negative means heating."""
ne = self.get_equilib_ne(density, ienergy, helium)
nh = density * (1-helium)
temp = self._get_temp(ne/nh, ienergy, helium)
nH0 = self._nH0(nh, temp, ne)
nHe0 = self._nHe0(nh, temp, ne)
nHp = self._nHp(nh, temp, ne)
nHep = self._nHep(nh, temp, ne)
nHepp = self._nHepp(nh, temp, ne)
#This is the collisional excitation and ionisation rate.
LambdaCollis = ne * (self.cool.CollisionalH0(temp) * nH0 +
self.cool.CollisionalHe0(temp) * nHe0 +
self.cool.CollisionalHeP(temp) * nHep)
LambdaRecomb = ne * (self.cool.RecombHp(temp) * nHp +
self.cool.RecombHeP(temp) * nHep +
self.cool.RecombHePP(temp) * nHepp)
LambdaFF = ne * (self.cool.FreeFree(temp, 1)*(nHp + nHep) + self.cool.FreeFree(temp, 2)*nHepp)
LambdaCmptn = ne * self.cool.InverseCompton(temp, self.redshift)
Lambda = LambdaCollis + LambdaRecomb + LambdaFF + LambdaCmptn
Heating = 0
if photoheating:
Heating = nH0 * self.photo.epsH0(self.redshift)
Heating += nHe0 * self.photo.epsHe0(self.redshift)
Heating += nHep * self.photo.epsHep(self.redshift)
Heating *= self.photo_factor
if self.he_model_on:
Heating *= self._he_reion_factor(density)
return Lambda - Heating
def get_equilib_ne(self, density, ienergy,helium=0.24):
"""Solve the system of equations for photo-ionisation equilibrium,
starting with ne = nH and continuing until convergence.
density is gas density in protons/cm^3
Internal energy is in J/kg == 10^-10 ergs/g.
helium is a mass fraction.
"""
#Get hydrogen number density
nh = density * (1-helium)
rooted = lambda ne: self._ne(nh, self._get_temp(ne/nh, ienergy, helium=helium), ne, helium=helium)
ne = scipy.optimize.fixed_point(rooted, nh,xtol=self.converge)
assert np.all(np.abs(rooted(ne) - ne) < self.converge)
return ne
def get_ne_by_nh(self, density, ienergy, helium=0.24):
"""Same as above, but get electrons per proton."""
return self.get_equilib_ne(density, ienergy, helium)/(density*(1-helium))
def get_neutral_fraction(self, density, ienergy, helium=0.24):
"""Get the neutral hydrogen fraction at a given temperature and density.
density is gas density in protons/cm^3
Internal energy is in J/kg == 10^-10 ergs/g.
helium is a mass fraction.
"""
ne = self.get_equilib_ne(density, ienergy, helium=helium)
nh = density * (1-helium)
temp = self._get_temp(ne/nh, ienergy, helium)
return self._nH0(nh, temp, ne) / nh
def _nH0(self, nh, temp, ne):
"""The neutral hydrogen number density. Eq. 33 of KWH."""
alphaHp = self.recomb.alphaHp(temp)
GammaeH0 = self.collisional * self.recomb.GammaeH0(temp)
photorate = self.photo.gH0(self.redshift)/ne*self.photo_factor*self._self_shield_corr(nh, temp)
return nh * alphaHp/ (alphaHp + GammaeH0 + photorate)
def _nHp(self, nh, temp, ne):
"""The ionised hydrogen number density. Eq. 34 of KWH."""
return nh - self._nH0(nh, temp, ne)
def _nHep(self, nh, temp, ne):
"""The ionised helium number density, divided by the helium number fraction. Eq. 35 of KWH."""
alphaHep = self.recomb.alphaHep(temp) + self.recomb.alphad(temp)
alphaHepp = self.recomb.alphaHepp(temp)
photofac = self.photo_factor*self._self_shield_corr(nh, temp)
GammaHe0 = self.collisional * self.recomb.GammaeHe0(temp) + self.photo.gHe0(self.redshift)/ne*photofac
GammaHep = self.collisional * self.recomb.GammaeHep(temp) + self.photo.gHep(self.redshift)/ne*photofac
return nh / (1 + alphaHep / GammaHe0 + GammaHep/alphaHepp)
def _nHe0(self, nh, temp, ne):
"""The neutral helium number density, divided by the helium number fraction. Eq. 36 of KWH."""
alphaHep = self.recomb.alphaHep(temp) + self.recomb.alphad(temp)
photofac = self.photo_factor*self._self_shield_corr(nh, temp)
GammaHe0 = self.collisional * self.recomb.GammaeHe0(temp) + self.photo.gHe0(self.redshift)/ne*photofac
return self._nHep(nh, temp, ne) * alphaHep / GammaHe0
def _nHepp(self, nh, temp, ne):
"""The doubly ionised helium number density, divided by the helium number fraction. Eq. 37 of KWH."""
photofac = self.photo_factor*self._self_shield_corr(nh, temp)
GammaHep = self.collisional * self.recomb.GammaeHep(temp) + self.photo.gHep(self.redshift)/ne*photofac
alphaHepp = self.recomb.alphaHepp(temp)
return self._nHep(nh, temp, ne) * GammaHep / alphaHepp
def _ne(self, nh, temp, ne, helium=0.24):
"""The electron number density. Eq. 38 of KWH."""
yy = helium / 4 / (1 - helium)
return self._nHp(nh, temp, ne) + yy * self._nHep(nh, temp, ne) + 2* yy * self._nHepp(nh, temp, ne)
def _self_shield_corr(self, nh, temp):
"""Photoionisation rate as a function of density from Rahmati 2012, eq. 14.
Calculates Gamma_{Phot} / Gamma_{UVB}.
Inputs: hydrogen density, temperature
n_H
The coefficients are their best-fit from appendix A."""
if not self.selfshield:
return np.ones_like(nh)
nSSh = 1.003*self._self_shield_dens(self.redshift, temp)
return 0.98*(1+(nh/nSSh)**1.64)**-2.28+0.02*(1+nh/nSSh)**-0.84
def _self_shield_dens(self,redshift, temp):
"""Calculate the critical self-shielding density. Rahmati 202 eq. 13.
gray_opac is a parameter of the UVB used.
gray_opac is in cm^2 (2.49e-18 is HM01 at z=3)
temp is particle temperature in K
f_bar is the baryon fraction. 0.17 is roughly 0.045/0.265
Returns density in atoms/cm^3"""
T4 = temp/1e4
G12 = self.photo.gH0(redshift)/1e-12
return 6.73e-3 * (self.Gray_ss(redshift) / 2.49e-18)**(-2./3)*(T4)**0.17*(G12)**(2./3)*(self.f_bar/0.17)**(-1./3)
def _he_reion_factor(self, density):
"""Compute a density dependent correction factor to the heating rate which can model the effect of helium reionization.
Argument: Gas density in protons/cm^3."""
#Newton's constant (cgs units)
gravity = 6.672e-8
#100 km/s/Mpc in h/sec
hubble = 3.2407789e-18
omegab = 0.0483
atime = 1/(1+self.redshift)
rhoc = 3 * (self.hub* hubble)**2 /(8* math.pi * gravity)
overden = self.protonmass * density /(omegab * rhoc * atime**(-3))
if overden >= self.he_thresh:
overden = self.he_thresh
return self.he_amp * overden**self.he_exp
def _get_temp(self, nebynh, ienergy, helium=0.24):
"""Compute temperature (in K) from internal energy and electron density.
Uses: internal energy
electron abundance per H atom (ne/nH)
hydrogen mass fraction (0.76)
Internal energy is in J/kg, internal gadget units, == 10^-10 ergs/g.
Factor to convert U (J/kg) to T (K) : U = N k T / (γ - 1)
T = U (γ-1) μ m_P / k_B
where k_B is the Boltzmann constant
γ is 5/3, the perfect gas constant
m_P is the proton mass
μ = 1 / (mean no. molecules per unit atomic weight)
= 1 / (X + Y /4 + E)
where E = Ne * X, and Y = (1-X).
Can neglect metals as they are heavy.
Leading contribution is from electrons, which is already included
[+ Z / (12->16)] from metal species
[+ Z/16*4 ] for OIV from electrons."""
#convert U (J/kg) to T (K) : U = N k T / (γ - 1)
#T = U (γ-1) μ m_P / k_B
#where k_B is the Boltzmann constant
#γ is 5/3, the perfect gas constant
#m_P is the proton mass
#μ is 1 / (mean no. molecules per unit atomic weight) calculated in loop.
#Internal energy units are 10^-10 erg/g
hy_mass = 1 - helium
muienergy = 4 / (hy_mass * (3 + 4*nebynh) + 1)*ienergy*1e10
#Boltzmann constant (cgs)
boltzmann=1.38066e-16
gamma=5./3
#So for T in K, boltzmann in erg/K, internal energy has units of erg/g
temp = (gamma-1) * self.protonmass / boltzmann * muienergy
return temp
class RecombRatesCen92(object):
"""Recombination rates and collisional ionization rates, as a function of temperature.
This is taken from KWH 06, astro-ph/9509107, Table 2, based on Cen 1992.
Illustris uses these rates."""
def alphaHp(self,temp):
"""Recombination rate for H+, ionized hydrogen, in cm^3/s.
Temp in K."""
return 8.4e-11 / np.sqrt(temp) / np.power(temp/1000, 0.2) / (1+ np.power(temp/1e6, 0.7))
def alphaHep(self,temp):
"""Recombination rate for He+, ionized helium, in cm^3/s.
Temp in K."""
return 1.5e-10 / np.power(temp,0.6353)
def alphad(self, temp):
"""Recombination rate for dielectronic recombination, in cm^3/s.
Temp in K."""
return 1.9e-3 / np.power(temp,1.5) * np.exp(-4.7e5/temp)*(1+0.3*np.exp(-9.4e4/temp))
def alphaHepp(self, temp):
"""Recombination rate for doubly ionized helium, in cm^3/s.
Temp in K."""
return 4 * self.alphaHp(temp)
def GammaeH0(self,temp):
"""Collisional ionization rate for H0 in cm^3/s. Temp in K"""
return 5.85e-11 * np.sqrt(temp) * np.exp(-157809.1/temp) / (1+ np.sqrt(temp/1e5))
def GammaeHe0(self,temp):
"""Collisional ionization rate for H0 in cm^3/s. Temp in K"""
return 2.38e-11 * np.sqrt(temp) * np.exp(-285335.4/temp) / (1+ np.sqrt(temp/1e5))
def GammaeHep(self,temp):
"""Collisional ionization rate for H0 in cm^3/s. Temp in K"""
return 5.68e-12 * np.sqrt(temp) * np.exp(-631515.0/temp) / (1+ np.sqrt(temp/1e5))
class RecombRatesVerner96(object):
"""Recombination rates and collisional ionization rates, as a function of temperature.
Recombination rates are the fit from Verner & Ferland 1996 (astro-ph/9509083).
Collisional rates are the fit from Voronov 1997 (http://www.sciencedirect.com/science/article/pii/S0092640X97907324).
In a very photoionised medium this changes the neutral hydrogen abundance by approximately 10% compared to Cen 1992.
These rates are those used by Nyx.
"""
def _Verner96Fit(self, temp, aa, bb, temp0, temp1):
"""Formula used as a fitting function in Verner & Ferland 1996 (astro-ph/9509083)."""
sqrttt0 = np.sqrt(temp/temp0)
sqrttt1 = np.sqrt(temp/temp1)
return aa / ( sqrttt0 * (1 + sqrttt0)**(1-bb)*(1+sqrttt1)**(1+bb) )
def alphaHp(self,temp):
"""Recombination rate for H+, ionized hydrogen, in cm^3/s.
The V&F 96 fitting formula is accurate to < 1% in the worst case.
Temp in K."""
#See line 1 of V&F96 table 1.
return self._Verner96Fit(temp, aa=7.982e-11, bb=0.748, temp0=3.148, temp1=7.036e+05)
def alphaHep(self,temp):
"""Recombination rate for He+, ionized helium, in cm^3/s.
Accurate to ~2% for T < 10^6 and 5% for T< 10^10.
Temp in K."""
#VF96 give two rates. The first is more accurate for T < 10^6, the second is valid up to T = 10^10.
#We use the most accurate allowed. See lines 2 and 3 of Table 1 of VF96.
lowTfit = self._Verner96Fit(temp, aa=3.294e-11, bb=0.6910, temp0=1.554e+01, temp1=3.676e+07)
highTfit = self._Verner96Fit(temp, aa=9.356e-10, bb=0.7892, temp0=4.266e-02, temp1=4.677e+06)
#Note that at 10^6K the two fits differ by ~10%. This may lead one to disbelieve the quoted accuracies!
#We thus switch over at a slightly lower temperature.
#The two fits cross at T ~ 3e5K.
swtmp = 7e5
deltat = 1e5
upper = swtmp + deltat
lower = swtmp - deltat
#In order to avoid a sharp feature at 10^6 K, we linearly interpolate between the two fits around 10^6 K.
interpfit = (lowTfit * (upper - temp) + highTfit * (temp - lower))/(2*deltat)
return (temp < lower)*lowTfit + (temp > upper)*highTfit + (upper > temp)*(temp > lower)*interpfit
def alphad(self, temp):
"""Recombination rate for dielectronic recombination, in cm^3/s.
This is the value from Aldrovandi & Pequignot 73, as used in Nyx, Sherwood and Cen 1992.
It is corrected from the value in Aldrovandi & Pequignot 1973 by Burgess & Tworkowski 1976 (fig1)
by a factor of 0.65. The exponent is also made slightly more accurate.
Temp in K."""
return 1.23e-3 / np.power(temp,1.5) * np.exp(-4.72e5/temp)*(1+0.3*np.exp(-9.4e4/temp))
def alphaHepp(self, temp):
"""Recombination rate for doubly ionized helium, in cm^3/s. Accurate to 2%.
Temp in K."""
#See line 4 of V&F96 table 1.
return self._Verner96Fit(temp, aa=1.891e-10, bb=0.7524, temp0=9.370, temp1=2.774e6)
def _Voronov96Fit(self, temp, dE, PP, AA, XX, KK):
"""Fitting function for collisional rates. Eq. 1 of Voronov 1997. Accurate to 10%,
but data is only accurate to 50%."""
bolevk = 8.61734e-5 # Boltzmann constant in units of eV/K
UU = dE / (bolevk * temp)
return AA * (1 + PP * np.sqrt(UU))/(XX+UU) * UU**KK * np.exp(-UU)
def GammaeH0(self,temp):
"""Collisional ionization rate for H0 in cm^3/s. Temp in K. Voronov 97, Table 1."""
return self._Voronov96Fit(temp, 13.6, 0, 0.291e-07, 0.232, 0.39)
def GammaeHe0(self,temp):
"""Collisional ionization rate for He0 in cm^3/s. Temp in K. Voronov 97, Table 1."""
return self._Voronov96Fit(temp, 24.6, 0, 0.175e-07, 0.180, 0.35)
def GammaeHep(self,temp):
"""Collisional ionization rate for HeI in cm^3/s. Temp in K. Voronov 97, Table 1."""
return self._Voronov96Fit(temp, 54.4, 1, 0.205e-08, 0.265, 0.25)
class RecombRatesBadnell(RecombRatesVerner96):
"""Recombination rates and collisional ionization rates, as a function of temperature.
Recombination rates are the fit from Badnell's website: http://amdpp.phys.strath.ac.uk/tamoc/RR/#partial.
"""
def _RecombRateFit_lowcharge_ion(self, temp, aa, bb, cc, temp0, temp1, temp2):
"""Formula used as a fitting function in Verner & Ferland 1996 (astro-ph/9509083)/ See http://amdpp.phys.strath.ac.uk/tamoc/RR/#partial."""
sqrttt0 = np.sqrt(temp/temp0)
sqrttt1 = np.sqrt(temp/temp1)
BB = bb + cc*np.exp(-temp2/temp)
return aa / ( sqrttt0 * (1 + sqrttt0)**(1-BB)*(1+sqrttt1)**(1+BB) )
def alphaHp(self,temp):
"""Recombination rate for H+, ionized hydrogen, in cm^3/s.
Temp in K."""
#See line 1 of V&F96 table 1.
return self._Verner96Fit(temp, aa=8.318e-11, bb=0.7472, temp0=2.965, temp1=7.001e5)
def alphaHep(self,temp):
"""Recombination rate for H+, ionized hydrogen, in cm^3/s.
Temp in K."""
#See line 1 of V&F96 table 1.
return self._Verner96Fit(temp, aa=1.818E-10, bb=0.7492, temp0=10.17, temp1=2.786e6)
def alphaHepp(self, temp):
"""Recombination rate for doubly ionized helium, in cm^3/s.
Temp in K."""
#See line 4 of V&F96 table 1.
return self._RecombRateFit_lowcharge_ion(temp, aa=5.235E-11, bb=0.6988, cc=0.0829, temp0=7.301, temp1=4.475e6, temp2 = 1.682e5)
class PhotoRates(object):
"""The photoionization rates for a given species.
Eq. 29 of KWH 96. This is loaded from a TREECOOL table."""
def __init__(self, treecool_file="data/TREECOOL_ep_2018p"):
#Format of the treecool table:
# log_10(1+z), Gamma_HI, Gamma_HeI, Gamma_HeII, Qdot_HI, Qdot_HeI, Qdot_HeII,
# where 'Gamma' is the photoionization rate and 'Qdot' is the photoheating rate.
# The Gamma's are in units of s^-1, and the Qdot's are in units of erg s^-1.
try:
data = np.loadtxt(treecool_file)
except OSError:
treefile = os.path.join(os.path.dirname(os.path.realpath(__file__)), treecool_file)
data = np.loadtxt(treefile)
redshifts = data[:,0]
photo_rates = data[:,1:4]
photo_heat = data[:,4:7]
assert np.shape(redshifts)[0] == np.shape(photo_rates)[0]
self.Gamma_HI = interp.InterpolatedUnivariateSpline(redshifts, photo_rates[:,0])
self.Gamma_HeI = interp.InterpolatedUnivariateSpline(redshifts, photo_rates[:,1])
self.Gamma_HeII = interp.InterpolatedUnivariateSpline(redshifts, photo_rates[:,2])
self.Eps_HI = interp.InterpolatedUnivariateSpline(redshifts, photo_heat[:,0])
self.Eps_HeI = interp.InterpolatedUnivariateSpline(redshifts, photo_heat[:,1])
self.Eps_HeII = interp.InterpolatedUnivariateSpline(redshifts, photo_heat[:,2])
def gHe0(self,redshift):
"""Get photo rate for neutral Helium"""
log1z = np.log10(1+redshift)
return self.Gamma_HeI(log1z)
def gHep(self,redshift):
"""Get photo rate for singly ionized Helium"""
log1z = np.log10(1+redshift)
return self.Gamma_HeII(log1z)
def gH0(self,redshift):
"""Get photo rate for neutral Hydrogen"""
log1z = np.log10(1+redshift)
return self.Gamma_HI(log1z)
def epsHe0(self,redshift):
"""Get photo heating rate for neutral Helium"""
log1z = np.log10(1+redshift)
return self.Eps_HeI(log1z)
def epsHep(self,redshift):
"""Get photo heating rate for singly ionized Helium"""
log1z = np.log10(1+redshift)
return self.Eps_HeII(log1z)
def epsH0(self,redshift):
"""Get photo heating rate for neutral Hydrogen"""
log1z = np.log10(1+redshift)
return self.Eps_HI(log1z)
class CoolingRatesKWH92(object):
"""The cooling rates from KWH92, in erg s^-1 cm^-3 (cgs).
All rates are divided by the abundance of the ions involved in the interaction.
So we are computing the cooling rate divided by n_e n_X. Temperatures in K.
None of these rates are original to KWH92, but are taken from Cen 1992,
and originally from older references. The hydrogen rates in particular are probably inaccurate.
Cen 1992 modified (arbitrarily) the excitation and ionisation rates for high temperatures.
There is no collisional excitation rate for He0 - not sure why.
References:
Black 1981, from Lotz 1967, Seaton 1959, Burgess & Seaton 1960.
Recombination rates are from Spitzer 1978.
Free-free: Spitzer 1978.
Collisional excitation and ionisation cooling rates are merged.
"""
def __init__(self, tcmb=2.7255, t5_corr=1e5, recomb=None):
self.tcmb = tcmb
if recomb is None:
self.recomb = RecombRatesCen92()
else:
self.recomb = recomb
self.t5_corr = t5_corr
#1 eV in ergs
self.eVinergs = 1.60218e-12
#boltzmann constant in erg/K
self.kB = 1.38064852e-16
def _t5(self, temp):
"""Commonly used Cen 1992 correction factor for large temperatures.
This is implemented so that the cooling rates have the right
asymptotic behaviour. However, Cen erroneously imposes this correction at T=1e5,
which is too small: the Black 1981 rates these are based on should be good
until 5e5 at least, where the correction factor has a 10% effect already.
More modern tables thus impose it at T=5e7, which is still arbitrary but should be harmless.
"""
return 1+(temp/t5_corr)**0.5
def CollisionalExciteH0(self, temp):
"""Collisional excitation cooling rate for n_H0 and n_e. Gadget calls this BetaH0."""
return 7.5e-19 * np.exp(-118348.0/temp) /self._t5(temp)
def CollisionalExciteHeP(self, temp):
"""Collisional excitation cooling rate for n_He+ and n_e. Gadget calls this BetaHep."""
return 5.54e-17 * temp**(-0.397)*np.exp(-473638./temp)/self._t5(temp)
def CollisionalExciteHe0(self, temp):
"""This is listed in Cen 92 but neglected in KWH 97, presumably because it is very small."""
#return 0
return 9.1e-27 * temp**(-0.1687) * np.exp(-473638/temp) / self._t5(temp)
def CollisionalIonizeH0(self, temp):
"""Collisional ionisation cooling rate for n_H0 and n_e. Gadget calls this GammaeH0."""
#Ionisation potential of H0
return 13.5984 * self.eVinergs * self.recomb.GammaeH0(temp)
def CollisionalIonizeHe0(self, temp):
"""Collisional ionisation cooling rate for n_H0 and n_e. Gadget calls this GammaeHe0."""
return 24.5874 * self.eVinergs * self.recomb.GammaeHe0(temp)
def CollisionalIonizeHeP(self, temp):
"""Collisional ionisation cooling rate for n_H0 and n_e. Gadget calls this GammaeHep."""
return 54.417760 * self.eVinergs * self.recomb.GammaeHep(temp)
def CollisionalH0(self, temp):
"""Total collisional cooling for H0"""
return self.CollisionalExciteH0(temp) + self.CollisionalIonizeH0(temp)
def CollisionalHe0(self, temp):
"""Total collisional cooling for H0"""
return self.CollisionalExciteHe0(temp) + self.CollisionalIonizeHe0(temp)
def CollisionalHeP(self, temp):
"""Total collisional cooling for H0"""
return self.CollisionalExciteHeP(temp) + self.CollisionalIonizeHeP(temp)
def RecombHp(self, temp):
"""Recombination cooling rate for H+ and e. Gadget calls this AlphaHp."""
return 0.75 * self.kB * temp * self.recomb.alphaHp(temp)
def RecombHeP(self, temp):
"""Recombination cooling rate for He+ and e. Gadget calls this AlphaHep."""
#I'm not sure why they use 0.75 kT as the free energy of an electron.
#I would guess this is explained in Spitzer 1978.
return 0.75 * self.kB * temp * self.recomb.alphaHep(temp)+ self._RecombDielect(temp)
def RecombHePP(self, temp):
"""Recombination cooling rate for He++ and e. Gadget calls this AlphaHepp."""
return 0.75 * self.kB * temp * self.recomb.alphaHepp(temp)
def _RecombDielect(self, temp):
"""Dielectric recombination rate for He+ and e. Gadget calls this Alphad."""
#What is this magic number?
return 6.526e-11*self.recomb.alphad(temp)
def FreeFree(self, temp, zz):
"""Free-free cooling rate for electrons scattering on ions without being captured.
Factors here are n_e and total ionized species:
(FreeFree(zz=1)*(n_H+ + n_He+) + FreeFree(zz=2)*n_He++)"""
return 1.426e-27*np.sqrt(temp)*zz**2*self._gff(temp,zz)
def _gff(self, temp, zz):
"""Formula for the Gaunt factor. KWH takes this from Spitzer 1978."""
_ = zz
return 1.1+0.34*np.exp(-(5.5 - np.log10(temp))**2/3.)
def InverseCompton(self, temp, redshift):
"""Cooling rate for inverse Compton from the microwave background.
Multiply this only by n_e. Note the CMB temperature is hardcoded in KWH92 to 2.7."""
tcmb_red = self.tcmb * (1+redshift)
#Thompson cross-section in cm^2
sigmat = 6.6524e-25
#Radiation density constant, 4 sigma_stefan-boltzmann / c in erg cm^-3 K^-4
rad_dens = 7.5657e-15
#Electron mass in g
me = 9.10938e-28
#Speed of light in cm/s
cc = 2.99792e10
return 4 * sigmat * rad_dens / (me*cc) * tcmb_red**4 * self.kB * (temp - tcmb_red)
class CoolingRatesSherwood(CoolingRatesKWH92):
"""The cooling rates used in the Sherwood simulation, Bolton et al 2017, in erg s^-1 cm^-3 (cgs).
Differences from KWH92 are updated recombination and collisional ionization rates, and the use of a
larger temperature correction factor than Cen 92.
"""
def __init__(self, tcmb=2.7255, recomb=None):
CoolingRatesKWH92.__init__(tcmb = tcmb, t5_corr = 5e7, recomb=RecombRatesVerner96)
class CoolingRatesNyx(CoolingRatesKWH92):
"""The cooling rates used in the Nyx paper Lukic 2014, 1406.6361, in erg s^-1 cm^-3 (cgs).
All rates are divided by the abundance of the ions involved in the interaction.
So we are computing the cooling rate divided by n_e n_X. Temperatures in K.
Major differences from KWH are the use of the Scholz & Walter 1991
hydrogen collisional cooling rates, a less aggressive high temperature correction for helium, and
Shapiro & Kang 1987 for free free.
Older Black 1981 recombination cooling rates are used!
They use the recombination rates from Verner & Ferland 96, but do not change the cooling rates to match.
Ditto the ionization rates from Voronov 1997: they should also use these rates for collisional ionisation,
although this is harder because Sholz & Walter don't break their rates into ionization and excitation.
References:
Scholz & Walters 1991 (0.45% accuracy)
Black 1981 (recombination and helium)
Shapiro & Kang 1987
"""
def __init__(self, tcmb=2.7255, recomb=None):
CoolingRatesKWH92.__init__(tcmb = tcmb, t5_corr = 5e7, recomb=recomb)
def CollisionalH0(self, temp):
"""Collisional cooling rate for n_H0 and n_e. Gadget calls this BetaH0 + GammaeH0.
Formula from Eq. 23, Table 4 of Scholz & Walters, claimed good to 0.45 %.
Note though that they have two datasets which differ by a factor of two.
Differs from Cen 92 by a factor of two."""
#Technically only good for T > 2000.
y = np.log(temp)
#Constant is 0.75/k_B in Rydberg
Ryd = 2.1798741e-11
tot = -0.75/self.kB*Ryd/temp
coeffslowT = [213.7913, 113.9492, 25.06062, 2.762755, 0.1515352, 3.290382e-3]
coeffshighT = [271.25446, 98.019455, 14.00728, 0.9780842, 3.356289e-2, 4.553323e-4]
for j in range(6):
tot += ((temp < 1e5)*coeffslowT[j]+(temp >=1e5)*coeffshighT[j])*(-y)**j
return 1e-20 * np.exp(tot)
def RecombHp(self, temp):
"""Recombination cooling rate for H+ and e. Gadget calls this AlphaHp.
Differs by O(10%) until 3x10^6."""
return 2.851e-27 * np.sqrt(temp) * (5.914 - 0.5 * np.log(temp) + 0.01184 * temp**(1./3))
def RecombHePP(self, temp):
"""Recombination cooling rate for H+ and e. Gadget calls this AlphaHepp.
Differs from Cen 92 by 10% until ~10^7"""
return 1.140e-26 * np.sqrt(temp) * (6.607 - 0.5 * np.log(temp) + 7.459e-3 * temp**(1./3))
def _gff(self, temp, zz):
"""Formula for the Gaunt factor from Shapiro & Kang 1987. ZZ is 1 for H+ and He+ and 2 for He++.
This is almost identical to the KWH rate but not continuous."""
#This is not continuous. Check the original reference.
little = (temp/zz**2 <= 3.2e5)
lt = np.log10(temp/zz**2)
return little * (0.79464 + 0.1243*lt) + np.logical_not(little) * ( 2.13164 - 0.1240 * lt)
| 49.94795 | 188 | 0.639688 | 31,438 | 0.992455 | 0 | 0 | 0 | 0 | 0 | 0 | 15,477 | 0.488588 |
03c09547958d70bb46801b4ba91b9730dc032295 | 1,949 | py | Python | adjuftments/utils/splitwise_auth_tool/splitwise_credentials.py | juftin/adjuftments | 0833923053db5090cd5aac6dd035f4058a81800f | [
"MIT"
] | null | null | null | adjuftments/utils/splitwise_auth_tool/splitwise_credentials.py | juftin/adjuftments | 0833923053db5090cd5aac6dd035f4058a81800f | [
"MIT"
] | null | null | null | adjuftments/utils/splitwise_auth_tool/splitwise_credentials.py | juftin/adjuftments | 0833923053db5090cd5aac6dd035f4058a81800f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Author:: Justin Flannery (mailto:juftin@juftin.com)
"""
Simple Flask Server to Expose Credentials
"""
from flask import Flask, jsonify, redirect, render_template, request, session, url_for
from splitwise import Splitwise
from adjuftments.config import SplitwiseConfig
app = Flask(__name__)
app.secret_key = "RandomSecretString"
@app.route("/")
def home():
if 'access_token' in session:
return redirect(url_for("credentials"))
return render_template("home.html")
@app.route("/login")
def login():
splitwise_object = Splitwise(consumer_key=SplitwiseConfig.SPLITWISE_CONSUMER_KEY,
consumer_secret=SplitwiseConfig.SPLITWISE_CONSUMER_SECRET)
url, secret = splitwise_object.getAuthorizeURL()
session['secret'] = secret
return redirect(url)
@app.route("/authorize")
def authorize():
if 'secret' not in session:
return redirect(url_for("home"))
oauth_token = request.args.get('oauth_token')
oauth_verifier = request.args.get('oauth_verifier')
splitwise_object = Splitwise(consumer_key=SplitwiseConfig.SPLITWISE_CONSUMER_KEY,
consumer_secret=SplitwiseConfig.SPLITWISE_CONSUMER_SECRET)
access_token = splitwise_object.getAccessToken(oauth_token, session['secret'], oauth_verifier)
session['access_token'] = access_token
return redirect(url_for("credentials"))
@app.route("/credentials")
def credentials():
credential_dict = dict(SPLITWISE_CONSUMER_KEY=SplitwiseConfig.SPLITWISE_CONSUMER_KEY,
SPLITWISE_CONSUMER_SECRET=SplitwiseConfig.SPLITWISE_CONSUMER_SECRET,
SPLITWISE_OAUTH_TOKEN=session["access_token"]["oauth_token"],
SPLITWISE_OAUTH_SECRET=session["access_token"]["oauth_token_secret"])
return jsonify(credential_dict)
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
| 33.603448 | 98 | 0.710621 | 0 | 0 | 0 | 0 | 1,506 | 0.772704 | 0 | 0 | 389 | 0.19959 |
03c53fb902449f1a9cbebfac139cb7b318479b1e | 250 | py | Python | kobert_transformers/utils.py | LoveMeWithoutAll/KoBERT-Transformers | 5e30015ae1101b57758fbe10a4e2502bc530acc1 | [
"Apache-2.0"
] | null | null | null | kobert_transformers/utils.py | LoveMeWithoutAll/KoBERT-Transformers | 5e30015ae1101b57758fbe10a4e2502bc530acc1 | [
"Apache-2.0"
] | null | null | null | kobert_transformers/utils.py | LoveMeWithoutAll/KoBERT-Transformers | 5e30015ae1101b57758fbe10a4e2502bc530acc1 | [
"Apache-2.0"
] | null | null | null | from .tokenization_kobert import KoBertTokenizer
def get_tokenizer(cache_dir=None):
if cache_dir is not None:
return KoBertTokenizer.from_pretrained(cache_dir)
else:
return KoBertTokenizer.from_pretrained('monologg/kobert')
| 27.777778 | 65 | 0.768 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.068 |
03c611fb4f50a42c6f79fa67871d099851e47dda | 107 | py | Python | testtakepicture.py | 1082sqnatc/missionspacelab2019 | 439753c8e309ece98963f58c9bb443217e75364e | [
"Apache-2.0"
] | null | null | null | testtakepicture.py | 1082sqnatc/missionspacelab2019 | 439753c8e309ece98963f58c9bb443217e75364e | [
"Apache-2.0"
] | 12 | 2019-12-01T15:52:08.000Z | 2020-02-02T13:52:36.000Z | testtakepicture.py | 1082sqnatc/missionspacelab2019 | 439753c8e309ece98963f58c9bb443217e75364e | [
"Apache-2.0"
] | null | null | null | from src.takepicture import takePicture
x=1
while x < 10:
takePicture(x)
print("success")
x=x+1 | 17.833333 | 39 | 0.672897 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.084112 |
03c68b9524f9bd7a43776576e52572665d646a5b | 1,864 | py | Python | dcracer/config.py | wallarug/dcracer | e959f7eff30fcec426d97b5dbf4ff0aa4d57bf6d | [
"MIT"
] | null | null | null | dcracer/config.py | wallarug/dcracer | e959f7eff30fcec426d97b5dbf4ff0aa4d57bf6d | [
"MIT"
] | 1 | 2021-07-13T13:09:51.000Z | 2021-07-13T13:09:51.000Z | dcracer/config.py | wallarug/dcracer | e959f7eff30fcec426d97b5dbf4ff0aa4d57bf6d | [
"MIT"
] | null | null | null | '''
# Config
'''
import cv2
import numpy as np
import platform
import time
import sys
##
## Open CV Variables
##
# show the debug output for the open cv
DEMO_MODE = True
# set some variables for testing output
FONT = cv2.FONT_HERSHEY_SIMPLEX
# Min and Max Area Sizes
AREA_SIZE_STOP = 30
AREA_SIZE_TURN = 35
AREA_SIZE_PARK = 75
AREA_SIZE_TRAFFIC = 25
MAX_AREA_SIZE = 2000
# kernels
KERNEL_SIZE = 3
TRAFFIC_KERNEL_SIZE = 3
STOP_KERNEL_SIZE = 9
# traffic signal threshold counters
COUNTER_THRESHOLD_GREEN = 20
COUNTER_THRESHOLD_RED = 25
COUNTER_THRESHOLD_AMBER = 15
# Define what colour space we are working with.
# For some reason Jetson Nano (gstreamer) needs RGB instead of BGR
os = platform.system()
if os == 'Linux': # Jetson
COLOUR_CONVERT = cv2.COLOR_RGB2HSV
elif os == 'Windows': # Testing
COLOUR_CONVERT = cv2.COLOR_BGR2HSV
elif os == 'Darwin':
COLOUR_CONVERT = cv2.COLOR_BGR2HSV
## Error checking (valid_range) function
# show the detection area in the output image
DRAW_RANGE = True
# set the range for detection (horizontal). Fractions of total (5 = 1/5, 2 = 1/2, 1 = whole frame)
VR_TOP = 5 # 1/5 - close to the top but no the roof
VR_BOTTOM = 2 # 1/2 - halfway
##
## Donkey Car Variables
##
# Threshold: How many values in set before running code. (set 0 to always run)
# Size: How many values to keep track of, more values opens potential for higher error rate (min 3, default 10)
DK_COUNTER_THRESHOLD = 4 # will take (+1) of value
DK_COUNTER_SIZE = 10 # 1 = ~0.05 secs, 20 = 1 sec
# Delay: wait this many cycles before executing the command (set to 0 for no delay)
# Runtime: wait this many cycles until AutoPilot can run again
DK_ACTION_DELAY = 10 # 10 = 0.5s, 20 = 1 sec
DK_ACTION_RUNTIME = 60 # 60 = 3.0s, 20 = 1 sec
# show the debug output for the donkey car part.
DK_SHOW_TEXT_DEBUG = True
| 24.853333 | 112 | 0.721567 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,091 | 0.5853 |
03c6a4780081ba46e0720aa30e18a9c4fde3152f | 1,596 | py | Python | escola/tests/selenium_test_case.py | vini84200/medusa2 | 37cf33d05be8b0195b10845061ca893ba5e814dd | [
"MIT"
] | 1 | 2019-03-15T18:04:24.000Z | 2019-03-15T18:04:24.000Z | escola/tests/selenium_test_case.py | vini84200/medusa2 | 37cf33d05be8b0195b10845061ca893ba5e814dd | [
"MIT"
] | 22 | 2019-03-17T21:53:50.000Z | 2021-03-31T19:12:19.000Z | escola/tests/selenium_test_case.py | vini84200/medusa2 | 37cf33d05be8b0195b10845061ca893ba5e814dd | [
"MIT"
] | 1 | 2018-11-25T03:05:23.000Z | 2018-11-25T03:05:23.000Z | # Developed by Vinicius José Fritzen
# Last Modified 13/04/19 16:04.
# Copyright (c) 2019 Vinicius José Fritzen and Albert Angel Lanzarini
import pytest
from decouple import config
from django.contrib.auth.models import User
from django.test import LiveServerTestCase, TestCase
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.wait import WebDriverWait
# @pytest.mark.selenium
class SeleniumTestCase(LiveServerTestCase):
"""
A base test case for Selenium, providing hepler methods for generating
clients and logging in profiles.
"""
def setUp(self):
options = Options()
if config('MOZ_HEADLESS', 0) == 1:
options.add_argument('-headless')
self.browser = CustomWebDriver(firefox_options=options)
def tearDown(self):
self.browser.quit()
class CustomWebDriver(webdriver.Firefox):
"""Our own WebDriver with some helpers added"""
def find_css(self, css_selector):
"""Shortcut to find elements by CSS. Returns either a list or singleton"""
elems = self.find_elements_by_css_selector(css_selector)
found = len(elems)
if found == 1:
return elems[0]
elif not elems:
raise NoSuchElementException(css_selector)
return elems
def wait_for_css(self, css_selector, timeout=7):
""" Shortcut for WebDriverWait"""
return WebDriverWait(self, timeout).until(lambda driver : driver.find_css(css_selector)) | 33.957447 | 96 | 0.714286 | 1,079 | 0.675219 | 0 | 0 | 0 | 0 | 0 | 0 | 467 | 0.29224 |
03c7a7b3db0ab25294b2db4724ac425e52e65c90 | 3,628 | py | Python | tests/general.py | tkhyn/djangorecipebook | 2cbb3d46631630e2c7a3c511b504de2088aac115 | [
"MIT"
] | null | null | null | tests/general.py | tkhyn/djangorecipebook | 2cbb3d46631630e2c7a3c511b504de2088aac115 | [
"MIT"
] | null | null | null | tests/general.py | tkhyn/djangorecipebook | 2cbb3d46631630e2c7a3c511b504de2088aac115 | [
"MIT"
] | null | null | null | """
General tests that concern all recipes
"""
import os
import sys
from ._base import mock, RecipeTests, test_project
# we use the very simple manage.Recipe to test BaseRecipe functionalities
from djangorecipebook.recipes import manage
class GeneralRecipeTests(RecipeTests):
recipe_class = manage.Recipe
recipe_name = 'manage'
recipe_options = {'recipe': 'djangorecipebook:manage'}
@mock.patch('zc.recipe.egg.egg.Scripts.working_set',
return_value=(None, []))
def test_create_script_projectdir(self, working_set):
# When a project dir is specified, it should be added to sys.path
self.init_recipe({'project-dir': test_project})
self.recipe.install()
to_find_in = os.path.join(self.buildout_dir, test_project)
if sys.platform == 'win32' and sys.version_info >= (3, 4):
to_find_in = to_find_in.lower()
self.assertIn(to_find_in,
self.script_cat('manage'))
@mock.patch('zc.recipe.egg.egg.Scripts.working_set',
return_value=(None, []))
def test_create_script_extra_paths(self, working_set):
# When extra paths are specified, they should be added to sys.path
# we use relative paths so that the test is valid on any platform
extra_paths = ('my/first/extra/path', 'my/second/extra/path')
# mimick buildout.cfg file formatting
self.init_recipe({'extra-paths': '\n '.join(extra_paths)})
self.recipe.install()
manage_script = self.script_cat('manage')
for p in extra_paths:
self.assertIn(os.path.normpath(p), manage_script)
@mock.patch('zc.recipe.egg.egg.Scripts.working_set',
return_value=(None, []))
def test_create_manage_script_with_initialization(self, working_set):
# When an init code is specified, it should be added to the script
self.init_recipe({'initialization': 'import os\nassert True'})
self.recipe.install()
self.assertIn('import os\nassert True\n'
'added_settings = {}\n\n'
'import djangorecipebook',
self.script_cat('manage'))
@mock.patch('zc.recipe.egg.egg.Scripts.working_set',
return_value=(None, []))
def test_create_manage_script_with_args(self, working_set):
# Default install of a test script, check that the call to
# djangorecipebook.test.main is present and has the apps names in the
# arguments
args = ('-v', '--no-input')
self.init_recipe({
'command': 'command',
'args': '\n '.join(args)
})
self.recipe.install()
manage_script = self.script_path('manage')
script_cat = self.script_cat(manage_script)
self.assertIn("djangorecipebook.scripts.manage.main(added_settings, "
"'command', %s)"
% ', '.join(["'%s'" % arg for arg in args]), script_cat)
self.assertIn('added_settings = {', script_cat)
@mock.patch('zc.recipe.egg.egg.Scripts.working_set',
return_value=(None, []))
def test_create_manage_script_with_envvars(self, working_set):
# Install of a test script with custom environment variables
self.init_recipe({'envvars': 'MYENVVAR = value'})
self.recipe.install()
manage_script = self.script_cat('manage')
self.assertIn('import os', manage_script)
self.assertIn("os.environ['MYENVVAR'] = 'value'", manage_script)
| 43.190476 | 79 | 0.620176 | 3,371 | 0.929162 | 0 | 0 | 3,169 | 0.873484 | 0 | 0 | 1,325 | 0.365215 |
03c7dfec4bf01608ff9510185092140717400a2f | 170 | py | Python | zinc/utils/validation.py | PressLabs/zinc | 9e1dc852f31f9897e7759962cf0f3e6d42fbe637 | [
"Apache-2.0"
] | 29 | 2017-06-29T15:03:49.000Z | 2018-01-30T14:07:26.000Z | zinc/utils/validation.py | presslabs/zinc | 94146e5203fc93ee0e8bb011a4db0ffcd4b0096e | [
"Apache-2.0"
] | 9 | 2019-01-11T09:07:17.000Z | 2022-02-03T12:50:21.000Z | zinc/utils/validation.py | PressLabs/zinc | 9e1dc852f31f9897e7759962cf0f3e6d42fbe637 | [
"Apache-2.0"
] | 1 | 2020-08-09T18:17:25.000Z | 2020-08-09T18:17:25.000Z | import ipaddress
def is_ipv6(ip_addr):
try:
ipaddress.IPv6Address(ip_addr)
return True
except ipaddress.AddressValueError:
return False
| 17 | 39 | 0.676471 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
03c990c91654998615108574d5737dfafe7b57a4 | 4,167 | py | Python | tests/config/evaluation.py | realtwister/LearnedEvolution | 2ec49b50a49acae9693cfb05ac114dfbcc4aa337 | [
"MIT"
] | null | null | null | tests/config/evaluation.py | realtwister/LearnedEvolution | 2ec49b50a49acae9693cfb05ac114dfbcc4aa337 | [
"MIT"
] | null | null | null | tests/config/evaluation.py | realtwister/LearnedEvolution | 2ec49b50a49acae9693cfb05ac114dfbcc4aa337 | [
"MIT"
] | null | null | null | import numpy as np
from collections import namedtuple
Population = namedtuple("Population", ['mean_fitness','mean', 'covariance'])
config = dict(
dimension = 2,
population_size = 100,
algorithm = dict(
mean_function =dict(
type = "RLMean"
),
covariance_function = dict(
type = "AMaLGaMCovariance"
),
convergence_criterion = dict(
type = "CovarianceConvergence",
threshold = 1e-20
)
),
problem_suite = dict(
clss=[
["RotateProblem", "TranslateProblem", "Rosenbrock"]
]
),
evaluator = dict(
algorithm = dict(
mean_function =dict(
type = "RLMean"
),
covariance_function = dict(
type = "AMaLGaMCovariance"
),
convergence_criterion = dict(
type = "TimeConvergence",
max_iter = 200
)
),
restoredir = "/tmp/thesis/single_benchmarks/differentialReward_TimeConv/10000",
logdir = "/tmp/thesis/single_benchmarks/differentialReward_TimeConv/evaluations/10000",
seed = 1001,
N_episodes = 100,
summarizer = lambda pop: Population(np.mean(pop.fitness), pop.mean, pop.covariance),
)
)
from learnedevolution import Benchmark, Evaluator
#benchmark = Benchmark.from_config(config, 'benchmark')
#benchmark.run()
evaluator = Evaluator.from_config(config, 'evaluator')
histories = evaluator.run()
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
# 1 history fitness plot
plt.figure();
data = dict(
fitness =[],
)
for i in range(len(histories)):
history = histories[i]
mean_fitness = -np.array([population.mean_fitness for population in history])
data['fitness'] += [mean_fitness];
plt.semilogy(mean_fitness, alpha = 0.1, color = 'k')
def plot_time_mean(fitness):
max_T = np.max([len(f) for f in fitness]);
transpose_fitness = [];
for t in range(max_T):
transpose_fitness.append([])
for f in fitness:
if t <len(f):
transpose_fitness[t].append(f[t]);
mean_fitness = [np.mean(f) for f in transpose_fitness];
plt.semilogy(mean_fitness)
def precision_hits(fitness, precisions, ts = None):
if ts is None:
ts = list(np.arange(len(fitness)).astype(float))
ps = sorted(precisions)[::-1]
hits = []
i = 0
for t, f in zip(ts, fitness):
while True:
if i>=len(ps):
break
if f < ps[i]:
hits.append(t)
i += 1
else:
break
if i>=len(ps):
break
return hits, ps[:len(hits)]
def plot_precision_mean(fitness, num_bins=100):
ts = [i for f in fitness for i in range(len(f)) ]
fs = [f for ff in fitness for f in ff]
fs,ts = zip(*sorted(zip(fs,ts), key=lambda pair: -pair[0]))
N = len(fs)
bin_size = np.ceil(N/num_bins).astype(int)
xs = [];
ys = [];
for i in range(num_bins):
xs.append(np.mean(ts[i*bin_size: (i+1)*bin_size]))
ys.append(np.mean(fs[i*bin_size: (i+1)*bin_size]))
plt.semilogy(xs,ys)
def plot_precision_hits (fitness, num_bins = 100 ):
max_precision = 0
min_precision = float('inf')
for f in fitness:
max_precision = max(max_precision, np.min(f))
min_precision = min(min_precision, np.max(f))
precisions = np.logspace(np.log10(min_precision), np.log10(max_precision), num_bins)
data = pd.DataFrame(columns=['time','precision'])
for f in fitness:
hits,ps = precision_hits(f, precisions)
plt.semilogy(hits,ps)
data = data.append([dict(time=t, precision=p) for t,p in zip(hits,ps)])
plt.figure()
ax = sns.scatterplot(x= 'precision', y='time', data=data, alpha= 0.1)
ax.set( xscale="log")
ax = sns.lineplot(x= 'precision', y='time', data=data, ax=ax, ci='sd')
ax.set( xscale="log")
plt.figure();
plt.yscale('log')
plot_time_mean(data['fitness'])
plot_precision_hits(data['fitness'], num_bins=10)
plt.show();
| 27.78 | 95 | 0.593713 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 536 | 0.12863 |
03cc40e680dd0a778266264e42ce9370062476e1 | 1,036 | py | Python | tornado/4_celery_async_sleep.py | dongweiming/speakerdeck | 497352767a6ec57629f28d5c85f70bef38fc1914 | [
"Apache-2.0"
] | 6 | 2015-03-02T06:01:28.000Z | 2016-06-03T09:55:34.000Z | tornado/4_celery_async_sleep.py | dongweiming/speakerdeck | 497352767a6ec57629f28d5c85f70bef38fc1914 | [
"Apache-2.0"
] | null | null | null | tornado/4_celery_async_sleep.py | dongweiming/speakerdeck | 497352767a6ec57629f28d5c85f70bef38fc1914 | [
"Apache-2.0"
] | 5 | 2015-02-01T13:48:58.000Z | 2018-11-27T02:10:59.000Z | #!/bin/env python
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.gen
import tornado.httpclient
import tcelery
import sleep_task as tasks
from tornado.options import define, options
define("port", default=8000, help="run on the given port", type=int)
tcelery.setup_nonblocking_producer()
class SleepHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
yield tornado.gen.Task(tasks.sleep.apply_async, args=[5])
self.write("when i sleep 5s")
self.finish()
class JustNowHandler(tornado.web.RequestHandler):
def get(self):
self.write("i hope just now see you")
if __name__ == "__main__":
tornado.options.parse_command_line()
app = tornado.web.Application(handlers=[
(r"/sleep", SleepHandler), (r"/justnow", JustNowHandler)])
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
| 28 | 70 | 0.728764 | 363 | 0.350386 | 140 | 0.135135 | 197 | 0.190154 | 0 | 0 | 118 | 0.1139 |
03cdffc4aa94129ab097ff69eb0662a8a2afab32 | 1,026 | py | Python | 013-Flareon 6 Reloadered/resolve.py | schommi/low-tech | 8dc1c823204da9088ff696c5d23d5471eef317e1 | [
"MIT"
] | null | null | null | 013-Flareon 6 Reloadered/resolve.py | schommi/low-tech | 8dc1c823204da9088ff696c5d23d5471eef317e1 | [
"MIT"
] | null | null | null | 013-Flareon 6 Reloadered/resolve.py | schommi/low-tech | 8dc1c823204da9088ff696c5d23d5471eef317e1 | [
"MIT"
] | null | null | null | import itertools
secret = [
0x7A, 0x17, 0x08, 0x34, 0x17, 0x31, 0x3B, 0x25, 0x5B, 0x18, 0x2E, 0x3A, 0x15, 0x56, 0x0E, 0x11,
0x3E, 0x0D, 0x11, 0x3B, 0x24, 0x21, 0x31, 0x06, 0x3C, 0x26, 0x7C, 0x3C, 0x0D, 0x24, 0x16, 0x3A,
0x14, 0x79, 0x01, 0x3A, 0x18, 0x5A, 0x58, 0x73, 0x2E, 0x09, 0x00, 0x16, 0x00, 0x49, 0x22, 0x01,
0x40, 0x08, 0x0A, 0x14 ]
key = [0 for x in range(13)]
def decrypt():
result = ""
key_index = 0
for index in range(len(secret)):
result += chr(key [key_index] ^ secret[index])
key_index = (key_index + 1) % len (key)
return result
key[0] = ord ("@") ^ secret[-13]
key[1] = ord ("f") ^ secret[-12]
key[2] = ord ("l") ^ secret[-11]
key[3] = ord ("a") ^ secret[-10]
key[4] = ord ("r") ^ secret[-9]
key[5] = ord ("e") ^ secret[-8]
key[6] = ord ("-") ^ secret[-7]
key[7] = ord ("o") ^ secret[-6]
key[8] = ord ("n") ^ secret[-5]
key[9] = ord (".") ^ secret[-4]
key[10] = ord ("c") ^ secret[-3]
key[11] = ord ("o") ^ secret[-2]
key[12] = ord ("m") ^ secret[-1]
dbg = decrypt()
print(dbg)
| 27.72973 | 97 | 0.567251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.039961 |
03d0163037b3ffb3243f9f7b36c80a6e4a2647ef | 1,098 | py | Python | py/cidoc_crm_types/entities/e66_formation.py | minorg/cidoc-crm-types | 9018bdbf0658e4d28a87bc94543e467be45d8aa5 | [
"Apache-2.0"
] | null | null | null | py/cidoc_crm_types/entities/e66_formation.py | minorg/cidoc-crm-types | 9018bdbf0658e4d28a87bc94543e467be45d8aa5 | [
"Apache-2.0"
] | null | null | null | py/cidoc_crm_types/entities/e66_formation.py | minorg/cidoc-crm-types | 9018bdbf0658e4d28a87bc94543e467be45d8aa5 | [
"Apache-2.0"
] | null | null | null | from .e63_beginning_of_existence import E63BeginningofExistence
from .e7_activity import E7Activity
from dataclasses import dataclass
@dataclass
class E66Formation(E63BeginningofExistence, E7Activity):
"""
Scope note:
This class comprises events that result in the formation of a formal or informal E74 Group of people, such as a club, society, association, corporation or nation.
E66 Formation does not include the arbitrary aggregation of people who do not act as a collective. The formation of an instance of E74 Group does not require that the group is populated with members at the time of formation. In order to express the joining of members at the time of formation, the respective activity should be simultaneously an instance of both E66 Formation and E85 Joining.
Examples:
- the formation of the CIDOC CRM Special Interest Group
- the formation of the Soviet Union (Pipes, 1964)
- the conspiring of the murderers of Caesar (Irwin, 1935)
In First Order Logic:
E66(x) ⊃ E7(x)
E66(x) ⊃ E63(x)
"""
TYPE_URI = "http://erlangen-crm.org/current/E66_Formation"
| 40.666667 | 393 | 0.784153 | 950 | 0.865209 | 0 | 0 | 961 | 0.875228 | 0 | 0 | 872 | 0.794171 |
03d052fd2d3ee2a1a9e3667dd101b990d188cf77 | 7,269 | py | Python | platform/radio/efr32_multiphy_configurator/pro2_chip_configurator/src/si4440_modem_calc/pro2plusapilist.py | lmnotran/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 82 | 2016-06-29T17:24:43.000Z | 2021-04-16T06:49:17.000Z | platform/radio/efr32_multiphy_configurator/pro2_chip_configurator/src/si4440_modem_calc/pro2plusapilist.py | lmnotran/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 6 | 2022-01-12T18:22:08.000Z | 2022-03-25T10:19:27.000Z | platform/radio/efr32_multiphy_configurator/pro2_chip_configurator/src/si4440_modem_calc/pro2plusapilist.py | lmnotran/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 56 | 2016-08-02T10:50:50.000Z | 2021-07-19T08:57:34.000Z | '''
Created on Apr 4, 2013
@author: sesuskic
'''
from .pro2apilist import Pro2ApiList
from .trueround import trueround
__all__ = ["Pro2PlusApiList"]
class Pro2PlusApiList(Pro2ApiList):
def _add_seq_cfg(self, modem_calc, api_list):
# api_list['SEQ_CFG0'] = (modem_calc.modulator.fields.close_hw_dly_comp) # need API in FW
pass
def _add_modem_raw_search_api(self, modem_calc, api_list):
sch_frzen = modem_calc.demodulator.fields.sch_frzen
rawflt_sel = modem_calc.demodulator.fields.rawflt_sel
schprd_h = modem_calc.demodulator.fields.schprd_h
schprd_low = modem_calc.demodulator.fields.schprd_low
api_list['MODEM_RAW_SEARCH2'] = sch_frzen * 2 ** 7 + rawflt_sel * 2**6 + schprd_h * 2 ** 3 + schprd_low
def _add_chflt_rx_apis(self, modem_calc, api_list):
super(Pro2PlusApiList, self)._add_chflt_rx_apis(modem_calc, api_list)
spike_rm_en = modem_calc.demodulator.fields.spike_rm_en
spike_det_thd = modem_calc.demodulator.fields.spike_det_thd
api_list['MODEM_SPIKE_DET'] = int(trueround(spike_rm_en*2**7 + spike_det_thd)) & 0xff
arriving_src = modem_calc.demodulator.fields.arriving_src
signal_dsa_mode = modem_calc.demodulator.fields.signal_dsa_mode
arr_rst_en = modem_calc.demodulator.fields.arr_rst_en
est_osr_en = modem_calc.demodulator.fields.est_osr_en
arr_toler = modem_calc.demodulator.fields.arr_toler
api_list['MODEM_DSA_CTRL1'] = (arriving_src*2**6+signal_dsa_mode *2**5+arr_toler) & 0xff
# jira-1652: put arr_q_sync_en back into rev-c2
arr_q_sync_en = modem_calc.demodulator.fields.arr_q_sync_en
if modem_calc.revc0_c1:
arr_q_pm_en = modem_calc.demodulator.fields.arr_q_pm_en
skip_pm_det = modem_calc.demodulator.fields.skip_pm_det
else:
rx_pream_src = modem_calc.demodulator.fields.rx_pream_src
bcr_sw_sycw = modem_calc.demodulator.fields.bcr_sw_sycw
arrival_thd = modem_calc.demodulator.fields.arrival_thd
if modem_calc.revc0_c1:
api_list['MODEM_DSA_CTRL2'] = (arr_q_pm_en*2**7+arr_q_sync_en*2**6+bcr_sw_sycw*2**5+skip_pm_det*2**4+arrival_thd) & 0xff
else:
api_list['MODEM_DSA_CTRL2'] = (arr_q_sync_en*2**6 + bcr_sw_sycw*2**5 + arrival_thd) & 0xff
api_list['MODEM_ONE_SHOT_AFC'] = ((modem_calc.demodulator.fields.oneshot_afc*2**7 +
modem_calc.demodulator.fields.bcr_align_en*2**6 +
modem_calc.demodulator.fields.est_osr_en*2**5 +
modem_calc.demodulator.fields.afcma_en*2**4 +
modem_calc.demodulator.fields.oneshot_waitcnt) & 0xff)
api_list['MODEM_DSA_QUAL'] = (int(modem_calc.demodulator.fields.eye_qua_sel*2**7 +
modem_calc.demodulator.fields.arr_eye_qual)
& 0xff)
api_list['MODEM_DSA_RSSI'] =(modem_calc.demodulator.fields.arr_squelch*2**7 +
modem_calc.demodulator.fields.rssi_arr_thd)
api_list['MODEM_DECIMATION_CFG2'] = ((modem_calc.demodulator.fields.ndec3*32 +
modem_calc.demodulator.fields.ndec2gain*8 +
modem_calc.demodulator.fields.ndec2agc*4) & 0xff)
# jira-1651: set IFPKD-TH for ETSI modes
if modem_calc.revc0_c1 == False:
api_list['MODEM_IFPKD_THRESHOLDS'] = modem_calc.demodulator.fields.ifpkd_th;
api_list['MODEM_RSSI_MUTE'] = (modem_calc.demodulator.fields.mute_rssi_sel*2**3 +
modem_calc.demodulator.fields.mute_rssi_cnt)
api_list['MODEM_DSA_MISC'] = (modem_calc.demodulator.fields.eyexest_en*2**6 +
modem_calc.demodulator.fields.eyexest_fast*2**5 +
modem_calc.demodulator.fields.low_duty)
if modem_calc.revc0_c1 == False:
api_list['PREAMBLE_CONFIG'] = rx_pream_src*2**7 + 0x21
# DSA RX hopping for super low data rate
if(modem_calc.demodulator.fields.rx_hopping_en ==1):
api_list['MODEM_DSM_CTRL'] = 0x13
api_list['RX_HOP_CONTROL'] = 0x10
api_list['RX_HOP_TABLE_SIZE'] = modem_calc.demodulator.fields.fh_ch_number+1
#'SET_PROPERTY' 'RX_HOP_TABLE_ENTRY_0' 05
table_entry = 0
while table_entry <= modem_calc.demodulator.fields.fh_ch_number:
hop_table = "RX_HOP_TABLE_ENTRY_" + str(table_entry)
api_list[hop_table] = table_entry
table_entry = table_entry + 1
def _add_ook_blopk(self, modem_calc, api_list):
api_list['MODEM_OOK_BLOPK'] = modem_calc.demodulator.fields.bw_peak
def _add_rssi_group(self, modem_calc, api_list):
# api_list['MODEM_RSSI_THRESH'] = 0 # default, not touched by calc
api_list['MODEM_RSSI_JUMP_THRESH'] = modem_calc.demodulator.fields.rssijmpthd
# self._api_list['MODEM_RSSI_CONTROL'] = 1 # default: latch at pmdet
api_list['MODEM_RSSI_CONTROL'] = (modem_calc.demodulator.fields.rssi_sel*8 + 1)
api_list['MODEM_RSSI_CONTROL2'] = (modem_calc.demodulator.fields.rssijmp_dwn*32 +
modem_calc.demodulator.fields.rssijmp_up*16 +
modem_calc.demodulator.fields.enrssijmp*8 +
modem_calc.demodulator.fields.jmpdlylen*4 +
modem_calc.demodulator.fields.enjmprx*2)
def _add_modem_if_control(self, api_list, modem_calc):
super(Pro2PlusApiList, self)._add_modem_if_control(api_list, modem_calc)
api_list['MODEM_IF_CONTROL'] += int(modem_calc.inputs.API_ETSI % 3) # if 3, write 0
# jira 1658: add 3 fields into OOK_MISC
def _add_modem_ook_misc(self, api_list, modem_calc):
fast_ma = modem_calc.demodulator.fields.fast_ma
detector = modem_calc.demodulator.fields.detector
api_list['MODEM_OOK_MISC'] = int(fast_ma*128 +
modem_calc.demodulator.fields.ook_limit_discharge*32 +
modem_calc.demodulator.fields.ook_squelch_en*16 +
modem_calc.demodulator.fields.ook_discharge_div*4 + detector)
def _add_modem_bcr_misc0(self, api_list, modem_calc):
if modem_calc.revc0_c1 == False:
# only write BCR_MISC0 in revC2
# api_list['MODEM_BCR_MISC0'] = int(adcwatch*128 + adcrst*64 + distogg*32 + ph0size*16)
# DSA_BCR_RST == diff0rst_en
api_list['MODEM_BCR_MISC0'] = int( modem_calc.demodulator.fields.res_lockup_byp*8 + modem_calc.demodulator.fields.diff0rst_en)
def __init__(self):
super(Pro2PlusApiList, self).__init__()
| 57.23622 | 138 | 0.618104 | 7,105 | 0.977438 | 0 | 0 | 0 | 0 | 0 | 0 | 1,113 | 0.153116 |
03d0bc9d1b09e698e817e41c4a64a567b8b2fd46 | 4,052 | py | Python | tests/mockers.py | bastienboutonnet/sheetwork | 7aa757ed12375ddd2c56502b721d91146d22b7ea | [
"MIT"
] | 9 | 2020-12-10T12:12:42.000Z | 2021-11-24T20:56:36.000Z | tests/mockers.py | bastienboutonnet/sheetwork | 7aa757ed12375ddd2c56502b721d91146d22b7ea | [
"MIT"
] | 266 | 2020-04-19T10:50:19.000Z | 2022-03-14T22:12:43.000Z | tests/mockers.py | bastienboutonnet/sheetwork | 7aa757ed12375ddd2c56502b721d91146d22b7ea | [
"MIT"
] | 3 | 2020-04-25T18:11:20.000Z | 2020-12-21T09:36:34.000Z | import pandas
from pandas import Timestamp
EXPECTED_CONFIG = {
"sheet_name": "df_dropper",
"sheet_key": "sample",
"target_schema": "sand",
"target_table": "bb_test_sheetwork",
"columns": [
{"name": "col_a", "datatype": "int"},
{"name": "col_b", "datatype": "varchar"},
{"name": "col_one", "datatype": "varchar"},
{"name": "renamed_col", "identifier": "long ass name", "datatype": "varchar"},
],
"excluded_columns": ["to_exclude"],
}
EXPECTED_DEV_TEST_PROFILE = {
"db_type": "snowflake",
"account": "a",
"user": "b",
"password": "c",
"role": "d",
"database": "e",
"warehouse": "f",
"schema": "g",
"guser": "sheetwork_test@blahh.iam.gserviceaccount.com",
}
NO_COLS_EXPECTED_CONFIG = {
"sheet_name": "no_cols",
"sheet_key": "sample",
"target_schema": "sand",
"target_table": "bb_test_sheetwork",
}
EXPECTED_SHEETWORK_PROJECT = {
"name": "sheetwork_test",
"target_schema": "sand",
"always_create_table": True,
"always_create_schema": True,
"destructive_create_table": True,
}
EXPECTED_SHEETWORK_PROJECT_ALL_CREATE = {
"name": "sheetwork_test",
"target_schema": "sand",
"always_create_objects": True,
"destructive_create_table": True,
}
EXPECTED_SHEETWORK_PROJECT_DEPRECATED = {
"name": "sheetwork_test",
"target_schema": "sand",
"always_create": True,
}
DIRTY_DF = {
"col_a": [1, 2, 32],
"col b": ["as . ", "b", " c"],
"1. ??col_one": ["aa", "bb", "cc"],
"": ["q", "q", "q"],
"col_1": [1, 2, 33],
"long ass name": ["foo", "bar", "fizz"],
"col_with_empty_string": ["1", "", "2"],
}
TO_CAST_DF = {
"col_int": ["1", "2", "32"],
"col_varchar": ["foo", "bar", "fizz"],
"created_date": ["2019/01/01", "2019/01/02", "2019/01/03"],
"col_bool": ["false", "False", "true"],
"col_numeric": ["1.2", "1.3", "1"],
}
CAST_DF = {
# this non conversion to int is intentional until we have a better fix see #205, #204
"col_int": {0: "1", 1: "2", 2: "32"},
"col_varchar": {0: "foo", 1: "bar", 2: "fizz"},
"created_date": {
0: Timestamp("2019-01-01 00:00:00"),
1: Timestamp("2019-01-02 00:00:00"),
2: Timestamp("2019-01-03 00:00:00"),
},
"col_bool": {0: False, 1: False, 2: True},
"col_numeric": {0: 1.2, 1: 1.3, 2: 1},
}
CASING_DF = {
"CamelCasedCol": [1, 2, 3],
"snake_cased_col": [1, 2, 3],
}
SNAKE_CASED_COLS = ["camel_cased_col", "snake_cased_col"]
CAMEL_CASED_COLS = ["CamelCasedCol", "SnakeCasedCol"]
CLEAN_DF = {
"col_a": {0: 1, 1: 2, 2: 32},
"col_b": {0: "as .", 1: "b", 2: "c"},
"1_col_one": {0: "aa", 1: "bb", 2: "cc"},
"col_1": {0: 1, 1: 2, 2: 33},
"long_ass_name": {0: "foo", 1: "bar", 2: "fizz"},
"col_with_empty_string": {0: "1", 1: "", 2: "2"},
}
RENAMED_DF = {
"col_a": {0: 1, 1: 2, 2: 32},
"col_b": {0: "as .", 1: "b", 2: "c"},
"1_col_one": {0: "aa", 1: "bb", 2: "cc"},
"col_1": {0: 1, 1: 2, 2: 33},
"renamed_col": {0: "foo", 1: "bar", 2: "fizz"},
}
DROP_COL_DF = {
"col_a": [1, 2, 32],
"col b": ["as . ", "b", " c"],
"1. col_one": ["aa", "bb", "cc"],
"": ["q", "q", "q"],
"long ass name": ["foo", "bar", "fizz"],
"to_exclude": ["garbage1", "garbage2", "garbage3"],
}
RENAMED_COLS = [
"col_a",
"col b",
"1. ??col_one",
"",
"col_1",
"renamed_col",
"col_with_empty_string",
]
EXCLUDED_DF_COLS = ["col_a", "col b", "1. col_one", "", "long ass name"]
EMPTY_HEADER_COLUMNS_DF = {
"col_ a ": [1, 2, 32],
" ": ["as . ", "b", " c"],
"1. col_one": ["aa", "bb", "cc"],
"": ["q", "q", "q"],
" col_1": [1, 2, 33],
}
NON_EMPTY_HEADER = {
"col_a": [1, 2, 32],
"col b": ["as . ", "b", " c"],
"1. col_one": ["aa", "bb", "cc"],
"col_1": [1, 2, 33],
"long ass name": ["foo", "bar", "fizz"],
"col_with_empty_string": ["1", "", "2"],
}
def generate_test_df(df):
test_df = pandas.DataFrame.from_dict(df)
return test_df
| 25.64557 | 89 | 0.517769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,089 | 0.515548 |
03d3ac7a4d3e7b410abc26d86dafcac236ceca0f | 337 | py | Python | app/bot/types.py | DramatikMan/mlhl-01-python-bot | ab65432781db8bb5b0ff3b698514a14393809360 | [
"MIT"
] | null | null | null | app/bot/types.py | DramatikMan/mlhl-01-python-bot | ab65432781db8bb5b0ff3b698514a14393809360 | [
"MIT"
] | null | null | null | app/bot/types.py | DramatikMan/mlhl-01-python-bot | ab65432781db8bb5b0ff3b698514a14393809360 | [
"MIT"
] | null | null | null | from typing import Any
from telegram.ext import CallbackContext, Dispatcher
CCT = CallbackContext[
dict[Any, Any],
dict[Any, Any],
dict[Any, Any]
]
DP = Dispatcher[
CCT,
dict[Any, Any],
dict[Any, Any],
dict[Any, Any]
]
DataRecord = tuple[
int, int, int, int, int, int, int, int, int, str, str, float
]
| 16.85 | 64 | 0.626113 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
03d62215eb44f2521a4a5180463ae9e3411c086d | 1,401 | py | Python | custom_components/gpodder/config_flow.py | hsolberg/gpodder | 6b3af212f8067c7084f638bf40c9a25fe6fc252d | [
"MIT"
] | 13 | 2019-03-21T10:44:58.000Z | 2021-04-17T09:19:53.000Z | custom_components/gpodder/config_flow.py | hsolberg/gpodder | 6b3af212f8067c7084f638bf40c9a25fe6fc252d | [
"MIT"
] | 18 | 2019-03-24T20:41:21.000Z | 2021-12-10T01:42:57.000Z | custom_components/gpodder/config_flow.py | hsolberg/gpodder | 6b3af212f8067c7084f638bf40c9a25fe6fc252d | [
"MIT"
] | 8 | 2019-03-24T06:19:24.000Z | 2021-06-03T11:08:23.000Z | """Adds config flow for gPodder."""
from homeassistant import config_entries
import voluptuous as vol
from custom_components.gpodder.const import (
CONF_NAME,
CONF_PASSWORD,
CONF_USERNAME,
CONF_DEVICE,
DEFAULT_NAME,
DOMAIN,
)
class GpodderFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Config flow for gPodder."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize."""
self._errors = {}
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
self._errors = {}
if user_input is not None:
return self.async_create_entry(
title=user_input[CONF_DEVICE], data=user_input
)
return await self._show_config_form(user_input)
async def _show_config_form(self, user_input):
"""Show the configuration form to edit location data."""
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
vol.Required(CONF_DEVICE): str,
vol.Required(CONF_NAME, default=DEFAULT_NAME): str,
}
),
errors=self._errors,
)
| 28.591837 | 71 | 0.605282 | 1,147 | 0.818701 | 0 | 0 | 0 | 0 | 879 | 0.627409 | 188 | 0.13419 |
03d6498ba61a917cb8382f4c387383c0b1401401 | 2,765 | py | Python | openfda/nsde/pipeline.py | FDA/openfda | 93c3abed4042a4a2729975468c4e377a67e8a5ca | [
"CC0-1.0"
] | 388 | 2015-01-09T18:50:35.000Z | 2022-03-24T10:15:23.000Z | openfda/nsde/pipeline.py | FDA/openfda | 93c3abed4042a4a2729975468c4e377a67e8a5ca | [
"CC0-1.0"
] | 150 | 2015-01-21T20:30:54.000Z | 2022-03-28T20:46:29.000Z | openfda/nsde/pipeline.py | FDA/openfda | 93c3abed4042a4a2729975468c4e377a67e8a5ca | [
"CC0-1.0"
] | 113 | 2015-01-31T21:24:16.000Z | 2022-01-30T15:17:28.000Z | #!/usr/local/bin/python
'''
Pipeline for converting CSV nsde data to JSON and importing into Elasticsearch.
'''
import glob
import os
from os.path import join, dirname
import luigi
from openfda import common, config, parallel, index_util
from openfda.common import newest_file_timestamp
NSDE_DOWNLOAD = \
'https://download.open.fda.gov/Comprehensive_NDC_SPL_Data_Elements_File.zip'
NSDE_EXTRACT_DB = 'nsde/nsde.db'
NSDE_RAW_DIR = config.data_dir('nsde/raw')
class DownloadNSDE(luigi.Task):
def output(self):
return luigi.LocalTarget(join(NSDE_RAW_DIR, 'nsde.csv'))
def run(self):
output_dir = dirname(self.output().path)
zip_filename = join(output_dir, 'nsde.zip')
common.download(NSDE_DOWNLOAD, zip_filename)
os.system('unzip -o %(zip_filename)s -d %(output_dir)s' % locals())
os.rename(glob.glob(join(output_dir, '*.csv'))[0], self.output().path)
class NSDE2JSONMapper(parallel.Mapper):
rename_map = {
"Item Code": "package_ndc",
"NDC11": "package_ndc11",
"Marketing Category": "marketing_category",
"Marketing Start Date": "marketing_start_date",
"Marketing End Date": "marketing_end_date",
"Billing Unit": "billing_unit",
"Proprietary Name": "proprietary_name",
"Dosage Form": "dosage_form",
"Application Number or Citation": "application_number_or_citation",
"Product Type": "product_type",
"Inactivation Date": "inactivation_date",
"Reactivation Date": "reactivation_date"
}
def map(self, key, value, output):
def _cleaner(k, v):
''' Helper function to rename keys and purge any keys that are not in
the map.
'''
if k in self.rename_map and v is not None and v != '':
if "Date" in k:
return (self.rename_map[k], str(int(v)))
if "Proprietary Name" in k:
return (self.rename_map[k], str(v).title())
else:
return (self.rename_map[k], v)
new_value = common.transform_dict(value, _cleaner)
output.add(key, new_value)
class NSDE2JSON(luigi.Task):
def requires(self):
return DownloadNSDE()
def output(self):
return luigi.LocalTarget(config.data_dir(NSDE_EXTRACT_DB))
def run(self):
parallel.mapreduce(
parallel.Collection.from_glob(
self.input().path, parallel.CSVDictLineInput()),
mapper=NSDE2JSONMapper(),
reducer=parallel.IdentityReducer(),
output_prefix=self.output().path)
class LoadJSON(index_util.LoadJSONBase):
index_name = 'othernsde'
type_name = 'othernsde'
mapping_file = './schemas/othernsde_mapping.json'
data_source = NSDE2JSON()
use_checksum = False
optimize_index = True
last_update_date = lambda _: newest_file_timestamp(NSDE_RAW_DIR)
if __name__ == '__main__':
luigi.run()
| 28.802083 | 79 | 0.691863 | 2,246 | 0.812297 | 0 | 0 | 0 | 0 | 0 | 0 | 900 | 0.325497 |
03d659e23330734a212d3f3f3cc9b22edbb8b9c6 | 1,397 | py | Python | mirari/INV/migrations/0003_auto_20190609_1903.py | gcastellan0s/mirariapp | 24a9db06d10f96c894d817ef7ccfeec2a25788b7 | [
"MIT"
] | null | null | null | mirari/INV/migrations/0003_auto_20190609_1903.py | gcastellan0s/mirariapp | 24a9db06d10f96c894d817ef7ccfeec2a25788b7 | [
"MIT"
] | 18 | 2019-12-27T19:58:20.000Z | 2022-02-27T08:17:49.000Z | mirari/INV/migrations/0003_auto_20190609_1903.py | gcastellan0s/mirariapp | 24a9db06d10f96c894d817ef7ccfeec2a25788b7 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.5 on 2019-06-10 00:03
from django.db import migrations, models
import localflavor.mx.models
class Migration(migrations.Migration):
dependencies = [
('INV', '0002_auto_20190608_2204'),
]
operations = [
migrations.AlterField(
model_name='fiscalmx',
name='contactEmail',
field=models.EmailField(default='email@email.com', help_text='Correo donde llegarán las notificaciones sobre facturación', max_length=100, verbose_name='Email contacto'),
preserve_default=False,
),
migrations.AlterField(
model_name='fiscalmx',
name='persona',
field=models.CharField(choices=[('FISICA', 'FISICA'), ('MORAL', 'MORAL')], default='Física', max_length=100, verbose_name='Tipo de persona'),
),
migrations.AlterField(
model_name='fiscalmx',
name='razon_social',
field=models.CharField(default='Razon Social', help_text='Razón social de persona Física o Moral', max_length=255, verbose_name='Razón social'),
preserve_default=False,
),
migrations.AlterField(
model_name='fiscalmx',
name='rfc',
field=localflavor.mx.models.MXRFCField(default='SUL010720JN8', max_length=13, verbose_name='RFC'),
preserve_default=False,
),
]
| 36.763158 | 182 | 0.625626 | 1,281 | 0.913043 | 0 | 0 | 0 | 0 | 0 | 0 | 400 | 0.285103 |
03d7145827f639d44aefbdbab3925d11bd2e21e5 | 1,994 | py | Python | tests/test_fluids_ecl.py | trhallam/digirock | 05b1199d741a384345a4930605be97369c9ec270 | [
"MIT"
] | null | null | null | tests/test_fluids_ecl.py | trhallam/digirock | 05b1199d741a384345a4930605be97369c9ec270 | [
"MIT"
] | 2 | 2022-02-28T08:51:53.000Z | 2022-02-28T13:24:33.000Z | tests/test_fluids_ecl.py | trhallam/digirock | 05b1199d741a384345a4930605be97369c9ec270 | [
"MIT"
] | null | null | null | """Test functions for pem.fluid.ecl module
"""
import pytest
from pytest import approx
import numpy as np
import digirock.fluids.ecl as fluid_ecl
from inspect import getmembers, isfunction
@pytest.fixture
def tol():
return {
"rel": 0.05, # relative testing tolerance in percent
"abs": 0.00001, # absolute testing tolerance
}
@pytest.mark.parametrize(
"pres, extrap, ans",
[
(325, "const", 1.4615),
(325, "pchip", 1.4615),
(np.r_[325, 375], "const", np.r_[1.4615, 1.4505]),
(np.r_[325, 375], "pchip", np.r_[1.4615, 1.4505]),
],
)
def test_oil_fvf_table(test_data, pres, ans, extrap, tol):
tab = np.loadtxt(test_data / "PVT_BO.inc")
assert np.allclose(
fluid_ecl.oil_fvf_table(tab[:, 0], tab[:, 1], pres, extrap=extrap),
ans,
rtol=tol["rel"],
)
def test_oil_fvf_table_bad_pchi(test_data):
tab = np.loadtxt(test_data / "PVT_BO.inc")
# test bad extrap
with pytest.raises(ValueError):
assert fluid_ecl.oil_fvf_table(
tab[:, 0], tab[:, 1], 235, extrap="Unknown Extrap"
)
@pytest.mark.parametrize(
"pres, extrap, ans",
[
(325, "const", 1.4615),
(325, "pchip", 1.4615),
(np.r_[325, 375], "const", np.r_[1.4615, 1.4505]),
(np.r_[325, 375], "pchip", np.r_[1.4615, 1.4505]),
],
)
def test_oil_fvf_table(test_data, pres, ans, extrap, tol):
tab = np.loadtxt(test_data / "PVT_BO.inc")
assert np.allclose(
fluid_ecl.oil_fvf_table(tab[:, 0], tab[:, 1], pres, extrap=extrap),
ans,
rtol=tol["rel"],
)
@pytest.mark.parametrize("api,ans", ((20, 0.933993399339934), (45, 0.8016997167138812)))
def test_e100_oil_density(api, ans, tol):
assert fluid_ecl.e100_oil_density(api) == approx(ans)
assert np.allclose(
fluid_ecl.e100_oil_density(np.r_[api, api]), np.r_[ans, ans], atol=tol["abs"]
)
| 28.485714 | 89 | 0.584253 | 0 | 0 | 0 | 0 | 1,499 | 0.751755 | 0 | 0 | 314 | 0.157472 |
03d7d92d4d5bfbe186989e957053e7a566a34b64 | 123 | py | Python | anyrun/__init__.py | mwalkowski/anyrun | 48545bcbbb4872ecc4f3736c9395d69b56ff6134 | [
"Apache-2.0"
] | 18 | 2019-06-10T09:37:14.000Z | 2021-09-28T18:39:50.000Z | anyrun/__init__.py | plinkert/anyrun | f0d6bd915460c9bd3d37acdcc27ddf20c92d0410 | [
"Apache-2.0"
] | 7 | 2019-07-17T04:50:59.000Z | 2020-05-09T13:33:08.000Z | anyrun/__init__.py | mwalkowski/anyrun | 48545bcbbb4872ecc4f3736c9395d69b56ff6134 | [
"Apache-2.0"
] | 5 | 2019-06-11T05:22:37.000Z | 2021-02-18T01:47:14.000Z | from anyrun.client import AnyRunClient, AnyRunException
__version__ = '0.1'
__all__ = ['AnyRunClient', 'AnyRunException']
| 24.6 | 55 | 0.780488 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.292683 |
03d7f39caab28a9c35a6797a159ddc4799ba288c | 618 | py | Python | python/sort/BubbleSort.py | smdsbz/homework | 6cac5cc006543bc0787ef4219e72f314ee04083e | [
"MIT"
] | 5 | 2017-05-21T15:36:27.000Z | 2018-01-01T09:47:26.000Z | python/sort/BubbleSort.py | smdsbz/homework | 6cac5cc006543bc0787ef4219e72f314ee04083e | [
"MIT"
] | null | null | null | python/sort/BubbleSort.py | smdsbz/homework | 6cac5cc006543bc0787ef4219e72f314ee04083e | [
"MIT"
] | null | null | null | #!/usr/bin/python3
'''
BubbleSort.py
by Xiaoguang Zhu
'''
array = []
print("Enter at least two numbers to start bubble-sorting.")
print("(You can end inputing anytime by entering nonnumeric)")
# get numbers
while True:
try:
array.append(float(input(">> ")))
except ValueError: # exit inputing
break
print("\nThe array you've entered was:"); print(array)
print("\nNow sorting...")
# sorting
for x in range(len(array)-1, 0, -1):
for y in range(x):
if array[y] > array[y+1]:
array[y], array[y+1] = array[y+1], array[y]
print(array)
# output
print("\nAll done! Now the moment of truth!")
print(array)
| 19.3125 | 62 | 0.665049 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 303 | 0.490291 |
03dd52993883fc7a35378bdcc353de5b907ed0cd | 386 | py | Python | Ex-08.py | gilmartins83/Guanabara-Python | 43128c35fcd601db1f72c80a9c76f4b4f4085c7f | [
"MIT"
] | null | null | null | Ex-08.py | gilmartins83/Guanabara-Python | 43128c35fcd601db1f72c80a9c76f4b4f4085c7f | [
"MIT"
] | null | null | null | Ex-08.py | gilmartins83/Guanabara-Python | 43128c35fcd601db1f72c80a9c76f4b4f4085c7f | [
"MIT"
] | null | null | null | medida = float(input("uma distancai em metros: "))
cm = medida * 100
mm = medida * 1000
dm = medida / 10
dam = medida * 1000000
hm = medida / 100
km = medida * 0.001
ml = medida * 0.000621371
m = medida * 100000
print("A medida de {:.0f}m corresponde a {:.0f} mm \n{:.0f} cm \n{:.0f} dm \n{:.0f} dam \n{:.0f} hm \n{:.2f} km \n{:.2f} ml" .format (medida, cm, mm, dm, dam, hm, km, ml))
| 29.692308 | 171 | 0.595855 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.375648 |
03ddc3f06dc7248a7bc64aaebf34b1b6df562b47 | 1,889 | py | Python | src/spaceone/monitoring/info/metric_info.py | jihyungSong/plugin-google-cloud-stackdriver | 3875b158ad047b9502c79475ac89e4a9d45fdb0b | [
"Apache-2.0"
] | 1 | 2020-06-22T09:49:24.000Z | 2020-06-22T09:49:24.000Z | src/spaceone/monitoring/info/metric_info.py | jihyungSong/plugin-aws-cloudwatch | 59e5ae7d6e93c8e46c221268ad93ab4a0b262fe8 | [
"Apache-2.0"
] | null | null | null | src/spaceone/monitoring/info/metric_info.py | jihyungSong/plugin-aws-cloudwatch | 59e5ae7d6e93c8e46c221268ad93ab4a0b262fe8 | [
"Apache-2.0"
] | null | null | null | import functools
from spaceone.api.monitoring.plugin import metric_pb2
from spaceone.api.core.v1 import plugin_pb2
from spaceone.core.pygrpc.message_type import *
__all__ = ['PluginMetricsResponse', 'PluginMetricDataResponse']
def PluginAction(action):
info = {
'method': action['method'],
}
if 'options' in action:
info['options'] = change_struct_type(action['options'])
return plugin_pb2.PluginAction(**info)
def MetricInfo(metric):
info = {
'key': metric['key'],
'name': metric['name'],
'unit': change_struct_type(metric['unit']),
'chart_type': metric['chart_type']
}
if 'chart_options' in metric:
info.update({
'chart_options': change_struct_type(metric['chart_options'])
})
return metric_pb2.MetricInfo(**info)
def MetricsInfo(result):
info = {
'metrics': [MetricInfo(metric) for metric in result['metrics']]
}
return metric_pb2.MetricsInfo(**info)
def PluginMetricsResponse(response):
info = {
'resource_type': response['resource_type'],
'result': MetricsInfo(response['result'])
}
if response.get('actions'):
info['actions']: [PluginAction(action) for action in response.get('actions', [])]
return metric_pb2.PluginMetricsResponse(**info)
def MetricDataInfo(result):
info = {
'labels': change_list_value_type(result['labels']),
'values': change_list_value_type(result['values'])
}
return metric_pb2.MetricDataInfo(**info)
def PluginMetricDataResponse(response):
info = {
'resource_type': response['resource_type'],
'result': MetricDataInfo(response['result'])
}
if response.get('actions'):
info['actions']: [PluginAction(action) for action in response.get('actions', [])]
return metric_pb2.PluginMetricDataResponse(**info)
| 25.186667 | 89 | 0.654844 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 391 | 0.206988 |
03de85b4de7acb4fb0b954a110ce43afb026ce19 | 7,466 | py | Python | POP CHECK R0 py36.py | Rigonz/PopDensity_SatelliteNightLight | 88b0fae1e09984e08506063908d9c7fce6dc2229 | [
"MIT"
] | null | null | null | POP CHECK R0 py36.py | Rigonz/PopDensity_SatelliteNightLight | 88b0fae1e09984e08506063908d9c7fce6dc2229 | [
"MIT"
] | null | null | null | POP CHECK R0 py36.py | Rigonz/PopDensity_SatelliteNightLight | 88b0fae1e09984e08506063908d9c7fce6dc2229 | [
"MIT"
] | null | null | null | '''
Created on: see version log.
@author: rigonz
coding: utf-8
IMPORTANT: requires py3.6 (rasterio)
Script that:
1) reads a series of raster files,
2) runs some checks,
3) makes charts showing the results.
The input data corresponds to a region of the world (ESP) and represents
the population density (pop/km2).
Each file has from a data provider, or different calculation conditions.
The checks consist in verifying that the input files refer to the same region
and to some intercomparison indicators.
The charts show the correlation among the different input data, as tuples
associated to the same geographical location.
Version log.
R0 (20210512):
First trials, seems to work well.
'''
# %% Imports.
import rasterio # IMPORTANT: requires py3.6
import numpy as np
from matplotlib import pyplot as plt
# %% Directories.
RootDirIn = 'D:/0 DOWN/zz EXTSave/GIS/POP/EUR/SHP/'
# Filenames:
FileNameI1 = RootDirIn + 'WP/ESP_clip_pd_2020_1km_UNadj.tif'
FileNameI2 = RootDirIn + 'WP/ESP_clip_ppp_2020_1km_Aggregated_UNadj_d.tif'
FileNameI3 = RootDirIn + 'GPW/ESP_clip gpw_v4_population_density_rev11_2020_30_sec.tif'
FileNameI4 = RootDirIn + 'GPW/ESP_clip gpw_v4_population_density_adjusted_to_2015_unwpp_country_totals_rev11_2020_30_sec.tif'
# %% Read data.
# Open files:
print('Opening and reading the files...')
ds1 = rasterio.open(FileNameI1)
ds2 = rasterio.open(FileNameI2)
ds3 = rasterio.open(FileNameI3)
ds4 = rasterio.open(FileNameI4)
# Read data:
band1 = ds1.read(1)
band2 = ds2.read(1)
band3 = ds3.read(1)
band4 = ds4.read(1)
# %% Check the datasets.
print('Checking the data...')
# Bounds:
if not(ds1.bounds == ds2.bounds and ds2.bounds == ds3.bounds and
ds3.bounds == ds4.bounds):
print('WARNING: bounds are not the same:')
print(ds1.bounds)
print(ds2.bounds)
print(ds3.bounds)
print(ds4.bounds)
# Width and height:
if not(ds1.width == ds2.width and ds2.width == ds3.width and
ds3.width == ds4.width):
print('WARNING: widths are not the same:')
print(ds1.width)
print(ds2.width)
print(ds3.width)
print(ds4.width)
if not(ds1.height == ds2.height and ds2.height == ds3.height and
ds3.height == ds4.height):
print('WARNING: heights are not the same:')
print(ds1.height)
print(ds2.height)
print(ds3.height)
print(ds4.height)
# Bands:
if not(ds1.indexes[0] == ds2.indexes[0] and ds2.indexes[0] == ds3.indexes[0]
and ds3.indexes[0] == ds4.indexes[0]):
print('WARNING: bands are not the same:')
print(ds1.indexes[0])
print(ds2.indexes[0])
print(ds3.indexes[0])
print(ds4.indexes[0])
# Dimensions:
if not(ds1.shape == ds2.shape and ds2.shape == ds3.shape and
ds3.shape == ds4.shape):
print('WARNING: shapes are not the same:')
print(ds1.shape)
print(ds2.shape)
print(ds3.shape)
print(ds4.shape)
# CRS:
try:
if (ds1.crs.data['init'] != 'epsg:4326' or
ds2.crs.data['init'] != 'epsg:4326' or
ds3.crs.data['init'] != 'epsg:4326' or
ds4.crs.data['init'] != 'epsg:4326'):
print('WARNING: CRS is not EPSG:4326.')
except:
print('WARNING: CRS is not available or is not EPSG:4326:')
# %% Create new bands.
print('Checking the new bands...')
# Remain within the boundaries of data:
left = max(ds1.bounds.left, ds2.bounds.left, ds3.bounds.left, ds4.bounds.left)
top = min(ds1.bounds.top, ds2.bounds.top, ds3.bounds.top, ds4.bounds.top)
right = min(ds1.bounds.right, ds2.bounds.right, ds3.bounds.right, ds4.bounds.right)
bottom = max(ds1.bounds.bottom, ds2.bounds.bottom, ds3.bounds.bottom, ds4.bounds.bottom)
res = 1 / 120. # 30 arc-sec, approx 100 m; should be min() etc.
height = int(np.ceil((top - bottom) / res + 1))
width = int(np.ceil((right - left) / res + 1))
res_x = (right - left) / (width - 1)
res_y = (top - bottom) / (height - 1)
# Check (valid for east + north hemispheres only!):
if right > min(ds1.bounds.right, ds2.bounds.right, ds3.bounds.right, ds4.bounds.right):
print('WARNING: right boundary exceeded.')
if bottom > max(ds1.bounds.bottom, ds2.bounds.bottom, ds3.bounds.bottom, ds4.bounds.bottom):
print('WARNING: bottom boundary exceeded.')
# Create new bands:
print('Creating the new bands...')
b1 = np.full((height, width), 0.)
b2 = np.full((height, width), 0.)
b3 = np.full((height, width), 0.)
b4 = np.full((height, width), 0.)
# Populate the new bands:
count = 0
for i in range(0, height-1, 1):
for j in range(0, width-1, 1):
x, y = (left + j * res_x, top - i * res_y)
row, col = ds1.index(x, y)
b1[i, j] = band1[row, col]
row, col = ds2.index(x, y)
b2[i, j] = band2[row, col]
row, col = ds3.index(x, y)
b3[i, j] = band3[row, col]
row, col = ds4.index(x, y)
b4[i, j] = band4[row, col]
# Show the progress:
if count % height % 50 == 0:
print('Progress... {:4.1f}%'.format(count/height*100))
count += 1
# %% Flatten and clear nodata.
print('Preparing the new bands...')
b1f = b1.flatten()
b2f = b2.flatten()
b3f = b3.flatten()
b4f = b4.flatten()
# Remove only nodata, retain 0s:
b_mask = np.array(np.array([b1f, b2f, b3f, b4f]).min(axis=0) < 0)
b1fm = np.delete(b1f, b_mask)
b2fm = np.delete(b2f, b_mask)
b3fm = np.delete(b3f, b_mask)
b4fm = np.delete(b4f, b_mask)
# %% Compute correlations.
print('Pearson coeff. after removing the no-data:')
print('DS1-2 = {:4.3f}.'.format(np.corrcoef(b1fm, b2fm)[0, 1]))
print('DS1-3 = {:4.3f}.'.format(np.corrcoef(b1fm, b3fm)[0, 1]))
print('DS1-4 = {:4.3f}.'.format(np.corrcoef(b1fm, b4fm)[0, 1]))
print('DS2-3 = {:4.3f}.'.format(np.corrcoef(b2fm, b3fm)[0, 1]))
print('DS2-4 = {:4.3f}.'.format(np.corrcoef(b2fm, b4fm)[0, 1]))
print('DS3-4 = {:4.3f}.'.format(np.corrcoef(b3fm, b4fm)[0, 1]))
# %% Draw histograms.
# Auxiliaries:
color = ['k', 'r', 'b', 'g']
label = ['DS1', 'DS2', 'DS3', 'DS4']
# Plot:
plt.hist([b1fm, b2fm, b3fm, b4fm], bins=20, color=color[0:4], label=label)
# Etc:
plt.title('DS=>0', loc='right')
plt.xlabel('pop. density, hab/km2')
plt.ylabel('count')
plt.grid(True)
plt.legend()
plt.show()
# Zoom at the right tail:
# Plot:
plt.hist([b1fm, b2fm, b3fm, b4fm], bins=20, color=color[0:4], label=label)
# Etc:
plt.title('DS>=0', loc='right')
plt.xlabel('pop. density, hab/km2')
plt.ylabel('count')
plt.grid(True)
plt.legend()
#•plt.xlim(1500, 40000)
plt.ylim(0, 7500)
plt.show()
# %% Draw chart.
# Auxiliaries:
color = ['k', 'r', 'b', 'g']
# Plot:
plt.figure(1, figsize=(4, 4), dpi=300)
# plt.scatter(b1fm, b3fm, color=color[0], s=1.0, label='1-3', alpha=0.1)
# plt.scatter(b1fm, b4fm, color=color[1], s=1.0, label='1-4', alpha=0.1)
plt.scatter(b2fm, b3fm, color=color[2], s=1.0, label='2-3', alpha=0.1)
# Titles:
plt.title('PD>=0', loc='right')
plt.xlabel('pop. density, hab/km2')
plt.ylabel('pop. density, hab/km2')
# Etc:
plt.grid(True)
plt.legend()
plt.tight_layout()
# Take a look:
plt.show()
# %% Draw heatmap.
# Remove 0s:
b_mask = np.array(np.array([b1f, b2f, b3f, b4f]).min(axis=0) <= 0)
b1fm = np.delete(b1f, b_mask)
b2fm = np.delete(b2f, b_mask)
b3fm = np.delete(b3f, b_mask)
b4fm = np.delete(b4f, b_mask)
# Plot:
plt.hist2d(np.log10(b2fm), np.log10(b3fm), bins=100, cmap='binary')
# Colorbar:
cb = plt.colorbar()
cb.set_label('Number of entries')
# Etc:
plt.title('PD>0', loc='right')
plt.xlabel('log10_DS2 pop. density, hab/km2')
plt.ylabel('log10_DS3 pop. density, hab/km2')
plt.tight_layout()
plt.show()
# %% Script done.
print('\nScript completed. Thanks!')
| 28.496183 | 125 | 0.659925 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,915 | 0.390332 |
03e1aa08a772e435ec4bfa6b688a45a8f113fb37 | 25,849 | py | Python | lookerapi/models/query.py | jcarah/python_sdk | 3bff34d04a828c940c3f93055e10b6a0095c2327 | [
"MIT"
] | null | null | null | lookerapi/models/query.py | jcarah/python_sdk | 3bff34d04a828c940c3f93055e10b6a0095c2327 | [
"MIT"
] | null | null | null | lookerapi/models/query.py | jcarah/python_sdk | 3bff34d04a828c940c3f93055e10b6a0095c2327 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Looker API 3.1 Reference
### Authorization The Looker API uses Looker **API3** credentials for authorization and access control. Looker admins can create API3 credentials on Looker's **Admin/Users** page. Pass API3 credentials to the **/login** endpoint to obtain a temporary access_token. Include that access_token in the Authorization header of Looker API requests. For details, see [Looker API Authorization](https://looker.com/docs/r/api/authorization) ### Client SDKs The Looker API is a RESTful system that should be usable by any programming language capable of making HTTPS requests. Client SDKs for a variety of programming languages can be generated from the Looker API's Swagger JSON metadata to streamline use of the Looker API in your applications. A client SDK for Ruby is available as an example. For more information, see [Looker API Client SDKs](https://looker.com/docs/r/api/client_sdks) ### Try It Out! The 'api-docs' page served by the Looker instance includes 'Try It Out!' buttons for each API method. After logging in with API3 credentials, you can use the \"Try It Out!\" buttons to call the API directly from the documentation page to interactively explore API features and responses. Note! With great power comes great responsibility: The \"Try It Out!\" button makes API calls to your live Looker instance. Be especially careful with destructive API operations such as `delete_user` or similar. There is no \"undo\" for API operations. ### Versioning Future releases of Looker will expand this API release-by-release to securely expose more and more of the core power of Looker to API client applications. API endpoints marked as \"beta\" may receive breaking changes without warning (but we will try to avoid doing that). Stable (non-beta) API endpoints should not receive breaking changes in future releases. For more information, see [Looker API Versioning](https://looker.com/docs/r/api/versioning) This **API 3.1** is in active development. This is where support for new Looker features will appear as non-breaking additions - new functions, new optional parameters on existing functions, or new optional properties in existing types. Additive changes should not impact your existing application code that calls the Looker API. Your existing application code will not be aware of any new Looker API functionality until you choose to upgrade your app to use a newer Looker API client SDK release. The following are a few examples of noteworthy items that have changed between API 3.0 and API 3.1. For more comprehensive coverage of API changes, please see the release notes for your Looker release. ### Examples of new things added in API 3.1: * Dashboard construction APIs * Themes and custom color collections APIs * Create and run SQL_runner queries * Create and run merged results queries * Create and modify dashboard filters * Create and modify password requirements ### Deprecated in API 3.0 The following functions and properties have been deprecated in API 3.0. They continue to exist and work in API 3.0 for the next several Looker releases but they have not been carried forward to API 3.1: * Dashboard Prefetch functions * User access_filter functions * User API 1.0 credentials functions * Space.is_root and Space.is_user_root properties. Use Space.is_shared_root and Space.is_users_root instead. ### Semantic changes in API 3.1: * `all_looks` no longer includes soft-deleted looks, matching `all_dashboards` behavior. You can find soft-deleted looks using `search_looks` with the `deleted` param set to True. * `all_spaces` no longer includes duplicate items * `search_users` no longer accepts Y,y,1,0,N,n for Boolean params, only \"true\" and \"false\". * For greater client and network compatibility, `render_task_results` now returns HTTP status ***202 Accepted*** instead of HTTP status ***102 Processing*** * `all_running_queries` and `kill_query` functions have moved into the `Query` function group. If you have application code which relies on the old behavior of the APIs above, you may continue using the API 3.0 functions in this Looker release. We strongly suggest you update your code to use API 3.1 analogs as soon as possible.
OpenAPI spec version: 3.1.0
Contact: support@looker.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Query(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, model=None, view=None, fields=None, pivots=None, fill_fields=None, filters=None, filter_expression=None, sorts=None, limit=None, column_limit=None, total=None, row_total=None, subtotals=None, runtime=None, vis_config=None, filter_config=None, visible_ui_sections=None, slug=None, dynamic_fields=None, client_id=None, share_url=None, expanded_share_url=None, url=None, query_timezone=None, has_table_calculations=None, can=None):
"""
Query - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'model': 'str',
'view': 'str',
'fields': 'list[str]',
'pivots': 'list[str]',
'fill_fields': 'list[str]',
'filters': 'dict(str, str)',
'filter_expression': 'str',
'sorts': 'list[str]',
'limit': 'str',
'column_limit': 'str',
'total': 'bool',
'row_total': 'str',
'subtotals': 'list[str]',
'runtime': 'float',
'vis_config': 'dict(str, str)',
'filter_config': 'dict(str, str)',
'visible_ui_sections': 'str',
'slug': 'str',
'dynamic_fields': 'str',
'client_id': 'str',
'share_url': 'str',
'expanded_share_url': 'str',
'url': 'str',
'query_timezone': 'str',
'has_table_calculations': 'bool',
'can': 'dict(str, bool)'
}
self.attribute_map = {
'id': 'id',
'model': 'model',
'view': 'view',
'fields': 'fields',
'pivots': 'pivots',
'fill_fields': 'fill_fields',
'filters': 'filters',
'filter_expression': 'filter_expression',
'sorts': 'sorts',
'limit': 'limit',
'column_limit': 'column_limit',
'total': 'total',
'row_total': 'row_total',
'subtotals': 'subtotals',
'runtime': 'runtime',
'vis_config': 'vis_config',
'filter_config': 'filter_config',
'visible_ui_sections': 'visible_ui_sections',
'slug': 'slug',
'dynamic_fields': 'dynamic_fields',
'client_id': 'client_id',
'share_url': 'share_url',
'expanded_share_url': 'expanded_share_url',
'url': 'url',
'query_timezone': 'query_timezone',
'has_table_calculations': 'has_table_calculations',
'can': 'can'
}
self._id = id
self._model = model
self._view = view
self._fields = fields
self._pivots = pivots
self._fill_fields = fill_fields
self._filters = filters
self._filter_expression = filter_expression
self._sorts = sorts
self._limit = limit
self._column_limit = column_limit
self._total = total
self._row_total = row_total
self._subtotals = subtotals
self._runtime = runtime
self._vis_config = vis_config
self._filter_config = filter_config
self._visible_ui_sections = visible_ui_sections
self._slug = slug
self._dynamic_fields = dynamic_fields
self._client_id = client_id
self._share_url = share_url
self._expanded_share_url = expanded_share_url
self._url = url
self._query_timezone = query_timezone
self._has_table_calculations = has_table_calculations
self._can = can
@property
def id(self):
"""
Gets the id of this Query.
Unique Id
:return: The id of this Query.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Query.
Unique Id
:param id: The id of this Query.
:type: int
"""
self._id = id
@property
def model(self):
"""
Gets the model of this Query.
Model
:return: The model of this Query.
:rtype: str
"""
return self._model
@model.setter
def model(self, model):
"""
Sets the model of this Query.
Model
:param model: The model of this Query.
:type: str
"""
if model is None:
raise ValueError("Invalid value for `model`, must not be `None`")
self._model = model
@property
def view(self):
"""
Gets the view of this Query.
Explore Name
:return: The view of this Query.
:rtype: str
"""
return self._view
@view.setter
def view(self, view):
"""
Sets the view of this Query.
Explore Name
:param view: The view of this Query.
:type: str
"""
if view is None:
raise ValueError("Invalid value for `view`, must not be `None`")
self._view = view
@property
def fields(self):
"""
Gets the fields of this Query.
Fields
:return: The fields of this Query.
:rtype: list[str]
"""
return self._fields
@fields.setter
def fields(self, fields):
"""
Sets the fields of this Query.
Fields
:param fields: The fields of this Query.
:type: list[str]
"""
self._fields = fields
@property
def pivots(self):
"""
Gets the pivots of this Query.
Pivots
:return: The pivots of this Query.
:rtype: list[str]
"""
return self._pivots
@pivots.setter
def pivots(self, pivots):
"""
Sets the pivots of this Query.
Pivots
:param pivots: The pivots of this Query.
:type: list[str]
"""
self._pivots = pivots
@property
def fill_fields(self):
"""
Gets the fill_fields of this Query.
Fill Fields
:return: The fill_fields of this Query.
:rtype: list[str]
"""
return self._fill_fields
@fill_fields.setter
def fill_fields(self, fill_fields):
"""
Sets the fill_fields of this Query.
Fill Fields
:param fill_fields: The fill_fields of this Query.
:type: list[str]
"""
self._fill_fields = fill_fields
@property
def filters(self):
"""
Gets the filters of this Query.
Filters
:return: The filters of this Query.
:rtype: dict(str, str)
"""
return self._filters
@filters.setter
def filters(self, filters):
"""
Sets the filters of this Query.
Filters
:param filters: The filters of this Query.
:type: dict(str, str)
"""
self._filters = filters
@property
def filter_expression(self):
"""
Gets the filter_expression of this Query.
Filter Expression
:return: The filter_expression of this Query.
:rtype: str
"""
return self._filter_expression
@filter_expression.setter
def filter_expression(self, filter_expression):
"""
Sets the filter_expression of this Query.
Filter Expression
:param filter_expression: The filter_expression of this Query.
:type: str
"""
self._filter_expression = filter_expression
@property
def sorts(self):
"""
Gets the sorts of this Query.
Sorting for the query results. Use the format `[\"view.field\", ...]` to sort on fields in ascending order. Use the format `[\"view.field desc\", ...]` to sort on fields in descending order. Use `[\"__UNSORTED__\"]` (2 underscores before and after) to disable sorting entirely. Empty sorts `[]` will trigger a default sort.
:return: The sorts of this Query.
:rtype: list[str]
"""
return self._sorts
@sorts.setter
def sorts(self, sorts):
"""
Sets the sorts of this Query.
Sorting for the query results. Use the format `[\"view.field\", ...]` to sort on fields in ascending order. Use the format `[\"view.field desc\", ...]` to sort on fields in descending order. Use `[\"__UNSORTED__\"]` (2 underscores before and after) to disable sorting entirely. Empty sorts `[]` will trigger a default sort.
:param sorts: The sorts of this Query.
:type: list[str]
"""
self._sorts = sorts
@property
def limit(self):
"""
Gets the limit of this Query.
Limit
:return: The limit of this Query.
:rtype: str
"""
return self._limit
@limit.setter
def limit(self, limit):
"""
Sets the limit of this Query.
Limit
:param limit: The limit of this Query.
:type: str
"""
self._limit = limit
@property
def column_limit(self):
"""
Gets the column_limit of this Query.
Column Limit
:return: The column_limit of this Query.
:rtype: str
"""
return self._column_limit
@column_limit.setter
def column_limit(self, column_limit):
"""
Sets the column_limit of this Query.
Column Limit
:param column_limit: The column_limit of this Query.
:type: str
"""
self._column_limit = column_limit
@property
def total(self):
"""
Gets the total of this Query.
Total
:return: The total of this Query.
:rtype: bool
"""
return self._total
@total.setter
def total(self, total):
"""
Sets the total of this Query.
Total
:param total: The total of this Query.
:type: bool
"""
self._total = total
@property
def row_total(self):
"""
Gets the row_total of this Query.
Raw Total
:return: The row_total of this Query.
:rtype: str
"""
return self._row_total
@row_total.setter
def row_total(self, row_total):
"""
Sets the row_total of this Query.
Raw Total
:param row_total: The row_total of this Query.
:type: str
"""
self._row_total = row_total
@property
def subtotals(self):
"""
Gets the subtotals of this Query.
Fields on which to run subtotals
:return: The subtotals of this Query.
:rtype: list[str]
"""
return self._subtotals
@subtotals.setter
def subtotals(self, subtotals):
"""
Sets the subtotals of this Query.
Fields on which to run subtotals
:param subtotals: The subtotals of this Query.
:type: list[str]
"""
self._subtotals = subtotals
@property
def runtime(self):
"""
Gets the runtime of this Query.
Runtime
:return: The runtime of this Query.
:rtype: float
"""
return self._runtime
@runtime.setter
def runtime(self, runtime):
"""
Sets the runtime of this Query.
Runtime
:param runtime: The runtime of this Query.
:type: float
"""
self._runtime = runtime
@property
def vis_config(self):
"""
Gets the vis_config of this Query.
Visualization configuration properties. These properties are typically opaque and differ based on the type of visualization used. There is no specified set of allowed keys. The values can be any type supported by JSON. A \"type\" key with a string value is often present, and is used by Looker to determine which visualization to present. Visualizations ignore unknown vis_config properties.
:return: The vis_config of this Query.
:rtype: dict(str, str)
"""
return self._vis_config
@vis_config.setter
def vis_config(self, vis_config):
"""
Sets the vis_config of this Query.
Visualization configuration properties. These properties are typically opaque and differ based on the type of visualization used. There is no specified set of allowed keys. The values can be any type supported by JSON. A \"type\" key with a string value is often present, and is used by Looker to determine which visualization to present. Visualizations ignore unknown vis_config properties.
:param vis_config: The vis_config of this Query.
:type: dict(str, str)
"""
self._vis_config = vis_config
@property
def filter_config(self):
"""
Gets the filter_config of this Query.
The filter_config represents the state of the filter UI on the explore page for a given query. When running a query via the Looker UI, this parameter takes precedence over \"filters\". When creating a query or modifying an existing query, \"filter_config\" should be set to null. Setting it to any other value could cause unexpected filtering behavior. The format should be considered opaque.
:return: The filter_config of this Query.
:rtype: dict(str, str)
"""
return self._filter_config
@filter_config.setter
def filter_config(self, filter_config):
"""
Sets the filter_config of this Query.
The filter_config represents the state of the filter UI on the explore page for a given query. When running a query via the Looker UI, this parameter takes precedence over \"filters\". When creating a query or modifying an existing query, \"filter_config\" should be set to null. Setting it to any other value could cause unexpected filtering behavior. The format should be considered opaque.
:param filter_config: The filter_config of this Query.
:type: dict(str, str)
"""
self._filter_config = filter_config
@property
def visible_ui_sections(self):
"""
Gets the visible_ui_sections of this Query.
Visible UI Sections
:return: The visible_ui_sections of this Query.
:rtype: str
"""
return self._visible_ui_sections
@visible_ui_sections.setter
def visible_ui_sections(self, visible_ui_sections):
"""
Sets the visible_ui_sections of this Query.
Visible UI Sections
:param visible_ui_sections: The visible_ui_sections of this Query.
:type: str
"""
self._visible_ui_sections = visible_ui_sections
@property
def slug(self):
"""
Gets the slug of this Query.
Slug
:return: The slug of this Query.
:rtype: str
"""
return self._slug
@slug.setter
def slug(self, slug):
"""
Sets the slug of this Query.
Slug
:param slug: The slug of this Query.
:type: str
"""
self._slug = slug
@property
def dynamic_fields(self):
"""
Gets the dynamic_fields of this Query.
Dynamic Fields
:return: The dynamic_fields of this Query.
:rtype: str
"""
return self._dynamic_fields
@dynamic_fields.setter
def dynamic_fields(self, dynamic_fields):
"""
Sets the dynamic_fields of this Query.
Dynamic Fields
:param dynamic_fields: The dynamic_fields of this Query.
:type: str
"""
self._dynamic_fields = dynamic_fields
@property
def client_id(self):
"""
Gets the client_id of this Query.
Client Id: used to generate shortened explore URLs. If set by client, must be a unique 22 character alphanumeric string. Otherwise one will be generated.
:return: The client_id of this Query.
:rtype: str
"""
return self._client_id
@client_id.setter
def client_id(self, client_id):
"""
Sets the client_id of this Query.
Client Id: used to generate shortened explore URLs. If set by client, must be a unique 22 character alphanumeric string. Otherwise one will be generated.
:param client_id: The client_id of this Query.
:type: str
"""
self._client_id = client_id
@property
def share_url(self):
"""
Gets the share_url of this Query.
Share Url
:return: The share_url of this Query.
:rtype: str
"""
return self._share_url
@share_url.setter
def share_url(self, share_url):
"""
Sets the share_url of this Query.
Share Url
:param share_url: The share_url of this Query.
:type: str
"""
self._share_url = share_url
@property
def expanded_share_url(self):
"""
Gets the expanded_share_url of this Query.
Expanded Share Url
:return: The expanded_share_url of this Query.
:rtype: str
"""
return self._expanded_share_url
@expanded_share_url.setter
def expanded_share_url(self, expanded_share_url):
"""
Sets the expanded_share_url of this Query.
Expanded Share Url
:param expanded_share_url: The expanded_share_url of this Query.
:type: str
"""
self._expanded_share_url = expanded_share_url
@property
def url(self):
"""
Gets the url of this Query.
Expanded Url
:return: The url of this Query.
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""
Sets the url of this Query.
Expanded Url
:param url: The url of this Query.
:type: str
"""
self._url = url
@property
def query_timezone(self):
"""
Gets the query_timezone of this Query.
Query Timezone
:return: The query_timezone of this Query.
:rtype: str
"""
return self._query_timezone
@query_timezone.setter
def query_timezone(self, query_timezone):
"""
Sets the query_timezone of this Query.
Query Timezone
:param query_timezone: The query_timezone of this Query.
:type: str
"""
self._query_timezone = query_timezone
@property
def has_table_calculations(self):
"""
Gets the has_table_calculations of this Query.
Has Table Calculations
:return: The has_table_calculations of this Query.
:rtype: bool
"""
return self._has_table_calculations
@has_table_calculations.setter
def has_table_calculations(self, has_table_calculations):
"""
Sets the has_table_calculations of this Query.
Has Table Calculations
:param has_table_calculations: The has_table_calculations of this Query.
:type: bool
"""
self._has_table_calculations = has_table_calculations
@property
def can(self):
"""
Gets the can of this Query.
Operations the current user is able to perform on this object
:return: The can of this Query.
:rtype: dict(str, bool)
"""
return self._can
@can.setter
def can(self, can):
"""
Sets the can of this Query.
Operations the current user is able to perform on this object
:param can: The can of this Query.
:type: dict(str, bool)
"""
self._can = can
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Query):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 32.392231 | 4,190 | 0.607954 | 21,401 | 0.827924 | 0 | 0 | 15,643 | 0.605168 | 0 | 0 | 17,102 | 0.661612 |
03e4f0c10cb588161abc592a99f12c950ad74fb3 | 1,036 | py | Python | custom_components/hello_world.py | swissglider/homeassistant_custome_components | 8d3fef980831789b6ecd7f51e9bc197b18fa8fb9 | [
"MIT"
] | null | null | null | custom_components/hello_world.py | swissglider/homeassistant_custome_components | 8d3fef980831789b6ecd7f51e9bc197b18fa8fb9 | [
"MIT"
] | 1 | 2019-02-01T15:09:37.000Z | 2019-02-01T15:09:37.000Z | custom_components/hello_world.py | swissglider/homeassistant_custome_components | 8d3fef980831789b6ecd7f51e9bc197b18fa8fb9 | [
"MIT"
] | 1 | 2022-01-19T10:09:32.000Z | 2022-01-19T10:09:32.000Z | """
Hello World Component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/developers/development_101/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
# Initialize the logger
_LOGGER = logging.getLogger(__name__)
# The domain of your component. Equal to the filename of your component.
DOMAIN = "hello_world"
# define the dependencies
DEPENDENCIES = []
CONF_TEXT = 'text'
DEFAULT_TEXT = 'No text!'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_TEXT): cv.string,
})
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Setup the hello_world component."""
# Get the text from the configuration. Use DEFAULT_TEXT if no name is provided.
text = config[DOMAIN].get(CONF_TEXT, DEFAULT_TEXT)
# States are in the format DOMAIN.OBJECT_ID.
hass.states.set('hello_world.Hello_State', text)
# Return boolean to indicate that initialization was successfully.
return True | 25.268293 | 83 | 0.741313 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 561 | 0.541506 |
03e57254dfaf90b0cc055c4ccd9443ee667b2d23 | 1,108 | py | Python | svsim/compare/sc21_compare/cirq/cirq_multiply_n13.py | yukwangmin/SV-Sim | 1b6b71cb490e7a1eac3d6ebc24777590d48378de | [
"MIT"
] | null | null | null | svsim/compare/sc21_compare/cirq/cirq_multiply_n13.py | yukwangmin/SV-Sim | 1b6b71cb490e7a1eac3d6ebc24777590d48378de | [
"MIT"
] | null | null | null | svsim/compare/sc21_compare/cirq/cirq_multiply_n13.py | yukwangmin/SV-Sim | 1b6b71cb490e7a1eac3d6ebc24777590d48378de | [
"MIT"
] | null | null | null | import time
import cirq
import numpy as np
from functools import reduce
q = [cirq.NamedQubit('q' + str(i)) for i in range(13)]
circuit = cirq.Circuit(
cirq.X(q[0]),
cirq.X(q[1]),
cirq.X(q[2]),
cirq.X(q[4]),
cirq.CCX(q[2], q[0], q[5]),
cirq.CCX(q[2], q[1], q[6]),
cirq.CCX(q[3], q[0], q[7]),
cirq.CCX(q[3], q[1], q[8]),
cirq.CCX(q[4], q[0], q[9]),
cirq.CCX(q[4], q[1], q[10]),
cirq.CNOT(q[6], q[11]),
cirq.CNOT(q[7], q[11]),
cirq.CNOT(q[8], q[12]),
cirq.CNOT(q[9], q[12]),
cirq.measure(q[5], key='c0'),
cirq.measure(q[11], key='c1'),
cirq.measure(q[12], key='c2'),
cirq.measure(q[10], key='c3')
)
start = time.time()
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=1)
result_dict = dict(result.multi_measurement_histogram(keys=['c0', 'c1', 'c2', 'c3', ]))
keys = list(map(lambda arr: reduce(lambda x, y: str(x) + str(y), arr[::-1]), result_dict.keys()))
counts = dict(zip(keys,[value for value in result_dict.values()]))
#print(counts)
end = time.time()
print("multiply_n13 simulate on Cirq:" + str(end-start))
| 30.777778 | 97 | 0.587545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.073105 |
03eb9e9a2678ce2856ad1cc39eac15c2a16bbcc9 | 431 | py | Python | Section_7/word_count_repo/src/word_count.py | PacktPublishing/Software-Engineering-with-Python-3.x | 056e4c89e4f8d7fc4a4095ee0671d6944a86630e | [
"MIT"
] | 1 | 2020-02-02T13:55:29.000Z | 2020-02-02T13:55:29.000Z | Section_7/word_count_repo/src/word_count.py | PacktPublishing/Software-Engineering-with-Python-3.x | 056e4c89e4f8d7fc4a4095ee0671d6944a86630e | [
"MIT"
] | null | null | null | Section_7/word_count_repo/src/word_count.py | PacktPublishing/Software-Engineering-with-Python-3.x | 056e4c89e4f8d7fc4a4095ee0671d6944a86630e | [
"MIT"
] | 2 | 2020-02-09T12:41:40.000Z | 2020-09-21T02:16:06.000Z | from project_utils import dict_to_file, get_word_count
if __name__ == "__main__":
inp_filename = 'sample.txt'
out_filename = 'count.csv'
print("Reading file ", inp_filename)
word_dict = get_word_count(inp_filename)
print("Output from get_word_count is")
print(word_dict)
print("Writing to file named", out_filename)
dict_to_file(word_dict, out_filename)
print("Done processing!")
| 19.590909 | 54 | 0.703016 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.278422 |
03ecb8a44ff3c3e66ce134246974ad0988fe8e8e | 1,242 | py | Python | examples/tempy_examples.py | NinoDoko/TemPy | c6bdd4c12ae1a4a5db6a852295f7f758b7dc595a | [
"Apache-2.0"
] | null | null | null | examples/tempy_examples.py | NinoDoko/TemPy | c6bdd4c12ae1a4a5db6a852295f7f758b7dc595a | [
"Apache-2.0"
] | null | null | null | examples/tempy_examples.py | NinoDoko/TemPy | c6bdd4c12ae1a4a5db6a852295f7f758b7dc595a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import os
from flask import Flask
app = Flask(__name__)
@app.route('/')
def none_handler():
from templates.homepage import page
return page.render()
@app.route('/hello_world')
def hello_world_handler():
from templates.hello_world import page
return page.render()
@app.route('/star_wars')
def star_wars_handler():
from templates.star_wars import page
json_filename = os.path.join(app.static_folder, 'sw-people.json')
with open(json_filename, 'r') as f:
people = json.load(f)['characters']
return page.render(characters=people)
@app.route('/list')
def render_list_handler():
from templates.render_list import page
return page.render()
@app.route('/static')
def static_files_handler():
from templates.static_files import page
return page.render()
@app.route('/table')
def table_handler():
from templates.table_example import page
return page.render()
@app.route('/css')
def css_handler():
from templates.css_example import page
return page.render()
@app.route('/homepage')
def homepage_handler():
from templates.homepage import page
return page.render()
if __name__ == '__main__':
app.run(port=8888, debug=True)
| 22.581818 | 69 | 0.706924 | 0 | 0 | 0 | 0 | 1,067 | 0.859098 | 0 | 0 | 134 | 0.10789 |
03ef1f344c45295f3dabd049b11b142929115048 | 1,671 | py | Python | Chapter05/airflow/dags/classification_pipeline_dag.py | arifmudi/Machine-Learning-Engineering-with-Python | 05c3fb9ae9fb9124a13812f59f8e681d66832d3b | [
"MIT"
] | 67 | 2021-01-31T19:43:15.000Z | 2022-03-27T08:03:56.000Z | Chapter05/airflow/dags/classification_pipeline_dag.py | arifmudi/Machine-Learning-Engineering-with-Python | 05c3fb9ae9fb9124a13812f59f8e681d66832d3b | [
"MIT"
] | null | null | null | Chapter05/airflow/dags/classification_pipeline_dag.py | arifmudi/Machine-Learning-Engineering-with-Python | 05c3fb9ae9fb9124a13812f59f8e681d66832d3b | [
"MIT"
] | 35 | 2021-02-08T14:34:46.000Z | 2022-03-18T16:06:09.000Z | from datetime import timedelta
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.utils.dates import days_ago
default_args = {
'owner': 'Andrew McMahon',
'depends_on_past': False,
'start_date': days_ago(2),
'email': ['example@example.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=2),
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
# 'wait_for_downstream': False,
# 'dag': dag,
# 'sla': timedelta(hours=2),
# 'execution_timeout': timedelta(seconds=300),
# 'on_failure_callback': some_function,
# 'on_success_callback': some_other_function,
# 'on_retry_callback': another_function,
# 'sla_miss_callback': yet_another_function,
# 'trigger_rule': 'all_success'
}
#instantiate DAG
dag = DAG(
'classification_pipeline',
default_args=default_args,
description=’Basic pipeline for classifying the Wine Dataset',
schedule_interval=timedelta(days=1), # run daily? check
)
get_data = BashOperator(
task_id='get_data',
bash_command='python3 /usr/local/airflow/scripts/get_data.py',
dag=dag,
)
train_model= BashOperator(
task_id='train_model',
depends_on_past=False,
bash_command='python3 /usr/local/airflow/scripts/train_model.py',
retries=3,
dag=dag,
)
# Persist to MLFlow
persist_model = BashOperator(
task_id='persist_model',
depends_on_past=False,
bash_command=’python ……./persist_model.py,
retries=3,
dag=dag,
)
get_data >> train_model >> persist_model
| 26.109375 | 69 | 0.691801 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 773 | 0.460393 |
03ef80065ba71a9283c8b010bfb8f94407342153 | 3,424 | py | Python | Yank/tests/test_pipeline.py | hannahbrucemacdonald/yank | 8f79b6a06f0a197bf761fea9451bf00021c3e690 | [
"MIT"
] | null | null | null | Yank/tests/test_pipeline.py | hannahbrucemacdonald/yank | 8f79b6a06f0a197bf761fea9451bf00021c3e690 | [
"MIT"
] | null | null | null | Yank/tests/test_pipeline.py | hannahbrucemacdonald/yank | 8f79b6a06f0a197bf761fea9451bf00021c3e690 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# =============================================================================
# MODULE DOCSTRING
# =============================================================================
"""
Test pipeline functions in pipeline.py.
"""
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
from yank.pipeline import *
# =============================================================================
# TESTS
# =============================================================================
def test_compute_min_dist():
"""Test computation of minimum distance between two molecules"""
mol1_pos = np.array([[-1, -1, -1], [1, 1, 1]], np.float)
mol2_pos = np.array([[3, 3, 3], [3, 4, 5]], np.float)
mol3_pos = np.array([[2, 2, 2], [2, 4, 5]], np.float)
assert compute_min_dist(mol1_pos, mol2_pos, mol3_pos) == np.sqrt(3)
def test_compute_min_max_dist():
"""Test compute_min_max_dist() function."""
mol1_pos = np.array([[-1, -1, -1], [1, 1, 1]])
mol2_pos = np.array([[2, 2, 2], [2, 4, 5]]) # determine min dist
mol3_pos = np.array([[3, 3, 3], [3, 4, 5]]) # determine max dist
min_dist, max_dist = compute_min_max_dist(mol1_pos, mol2_pos, mol3_pos)
assert min_dist == np.linalg.norm(mol1_pos[1] - mol2_pos[0])
assert max_dist == np.linalg.norm(mol1_pos[1] - mol3_pos[1])
# ==============================================================================
# SETUP PIPELINE UTILITY FUNCTIONS
# ==============================================================================
def test_remove_overlap():
"""Test function remove_overlap()."""
mol1_pos = np.array([[-1, -1, -1], [1, 1, 1]], np.float)
mol2_pos = np.array([[1, 1, 1], [3, 4, 5]], np.float)
mol3_pos = np.array([[2, 2, 2], [2, 4, 5]], np.float)
assert compute_min_dist(mol1_pos, mol2_pos, mol3_pos) < 0.1
mol1_pos = remove_overlap(mol1_pos, mol2_pos, mol3_pos, min_distance=0.1, sigma=2.0)
assert compute_min_dist(mol1_pos, mol2_pos, mol3_pos) >= 0.1
def test_pull_close():
"""Test function pull_close()."""
mol1_pos = np.array([[-1, -1, -1], [1, 1, 1]], np.float)
mol2_pos = np.array([[-1, -1, -1], [1, 1, 1]], np.float)
mol3_pos = np.array([[10, 10, 10], [13, 14, 15]], np.float)
translation2 = pull_close(mol1_pos, mol2_pos, 1.5, 5)
translation3 = pull_close(mol1_pos, mol3_pos, 1.5, 5)
assert isinstance(translation2, np.ndarray)
assert 1.5 <= compute_min_dist(mol1_pos, mol2_pos + translation2) <= 5
assert 1.5 <= compute_min_dist(mol1_pos, mol3_pos + translation3) <= 5
def test_pack_transformation():
"""Test function pack_transformation()."""
BOX_SIZE = 5
CLASH_DIST = 1
mol1 = np.array([[-1, -1, -1], [1, 1, 1]], np.float)
mols = [np.copy(mol1), # distance = 0
mol1 + 2 * BOX_SIZE] # distance > box
mols_affine = [np.append(mol, np.ones((2, 1)), axis=1) for mol in mols]
transformations = [pack_transformation(mol1, mol2, CLASH_DIST, BOX_SIZE) for mol2 in mols]
for mol, transf in zip(mols_affine, transformations):
assert isinstance(transf, np.ndarray)
mol2 = mol.dot(transf.T)[:, :3] # transform and "de-affine"
min_dist, max_dist = compute_min_max_dist(mol1, mol2)
assert CLASH_DIST <= min_dist and max_dist <= BOX_SIZE
| 40.761905 | 94 | 0.515479 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,094 | 0.319509 |
03f06d7ef5a24baadf174fb8ea7bc0c85df13ac9 | 4,626 | py | Python | a1d05eba1/scripts/entry.py | dorey/a1d05eba1 | eb6f66a946f3c417ab6bf9047ba9715be071967c | [
"0BSD"
] | null | null | null | a1d05eba1/scripts/entry.py | dorey/a1d05eba1 | eb6f66a946f3c417ab6bf9047ba9715be071967c | [
"0BSD"
] | 28 | 2020-06-23T19:00:58.000Z | 2021-03-26T22:13:07.000Z | a1d05eba1/scripts/entry.py | dorey/a1d05eba1 | eb6f66a946f3c417ab6bf9047ba9715be071967c | [
"0BSD"
] | null | null | null | import os
import json
import yaml
import argparse
from pyxform import xls2json
from pyxform.builder import create_survey_element_from_dict
from pprint import pprint
from ..content_variations import build_content
from ..utils.form_to_yaml_string import form_to_yaml_string
YAML_FORMAT = 'yml'
JSON_FORMAT = 'json'
XLS_FORMAT = 'xls'
XML_FORMAT = 'xml'
EXT_FORMATS = {
'.yml': YAML_FORMAT,
'.yaml': YAML_FORMAT,
'.json': JSON_FORMAT,
'.xlsx': XLS_FORMAT,
'.xls': XLS_FORMAT,
'.xml': XML_FORMAT,
}
def _lookup_format(path):
try:
return EXT_FORMATS[os.path.splitext(path)[1]]
except KeyError:
valid_extensions = ', '.join(list(EXT_FORMATS.keys()))
raise ValueError(f'No valid format found for file [ {path} ]\n'
f'Valid extensions: [{valid_extensions}]')
def sans_headers_and_no_directional_quotes(pyxform_dict):
delkeys = []
for key in pyxform_dict.keys():
if key.endswith('_header'):
delkeys.append(key)
for key in delkeys:
del pyxform_dict[key]
return json.loads(
json.dumps(pyxform_dict).replace(
'\\u201c', '\\"'
).replace(
'\\u201d', '\\"'
).replace(
'\\u2018', "'"
).replace(
'\\u2019', "'"
).replace(
'"TRUE"', 'true'
).replace(
'"FALSE"', 'false'
)
)
def open_xls(path_in):
xlspth = os.path.abspath(path_in)
return {
**sans_headers_and_no_directional_quotes(xls2json.xls_to_dict(xlspth)),
'schema': 'xlsform',
}
def open_yaml(path_in):
with open(path_in) as ff:
return yaml.safe_load(ff.read())
def form_to_xform(form_content, default_settings=None):
export_kwargs = {}
if default_settings:
export_kwargs['default_settings'] = default_settings
flat_json = form_content.export_to('xlsform', **export_kwargs)
wbjson = xls2json.workbook_to_json(flat_json)
survey = create_survey_element_from_dict(wbjson)
if hasattr(survey, '_translations'):
# tx_names is passed to the pyxform object to ensure the itext
# translations show up in the correct order.
# requires XLSForm/pyxform commit #68f0db99
tx_names = []
for tx in cc.txs.to_v1_strings():
if tx is not None:
tx_names.append(tx)
for tx_name in tx_names:
survey._translations[tx_name] = {}
return survey._to_pretty_xml()
def print_form(form, validate=False, format=None, to_file=None):
loaded_form = build_content(form, validate=validate)
def printer(string_value):
if to_file is None:
print(string_value)
else:
with open(to_file, 'w') as ff:
ff.write(string_value)
if format == 'json':
printer(json.dumps(loaded_form.export(), indent=2))
elif format == 'yml':
printer(form_to_yaml_string(loaded_form.export()))
elif format == 'xml':
default_settings = {'title': 'Form Title', 'identifier': 'generated'}
xml = form_to_xform(loaded_form,
default_settings=default_settings)
printer(xml)
else:
raise ValueError(f'Unknown format: {format}')
def run(path_in, path_out=None, validate=False, format=None):
if format is None and path_out is None:
# if no format or path is specified, then defualt output format is yml
format = 'yml'
elif format is None:
format = _lookup_format(path_out)
if not os.path.exists(path_in):
raise ValueError('Path does not exist: ' + path_in)
ext = _lookup_format(path_in)
if ext == XLS_FORMAT:
frm = open_xls(path_in)
elif ext == YAML_FORMAT:
frm = open_yaml(path_in)
elif ext == JSON_FORMAT:
frm = open_json(path_in)
print_form(frm, validate=validate, format=format, to_file=path_out)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'path_in',
help="Path to the YML file with the form definition",
)
parser.add_argument(
'-o', '--output', dest='path_out',
help='run the form through the schema validator',
)
parser.add_argument(
'-f', '--format', dest='format',
choices=['yml', 'json', 'xml'],
help='output format',
)
parser.add_argument(
'-v', '--validate', dest='validate',
action='store_true',
help='run the form through the schema validator',
)
run(**parser.parse_args().__dict__)
if __name__ == '__main__':
main()
| 29.845161 | 79 | 0.620406 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 903 | 0.195201 |
03f31a2a63dceed013a3bf2dd7cfcd908654b692 | 48 | py | Python | rmp2/rmpgraph/__init__.py | UWRobotLearning/rmp2 | c612a014f517204b38c552619a441be4b3d7b67f | [
"MIT"
] | 17 | 2021-07-05T19:53:27.000Z | 2022-03-28T18:10:20.000Z | rmp2/rmpgraph/__init__.py | UWRobotLearning/rmp2 | c612a014f517204b38c552619a441be4b3d7b67f | [
"MIT"
] | null | null | null | rmp2/rmpgraph/__init__.py | UWRobotLearning/rmp2 | c612a014f517204b38c552619a441be4b3d7b67f | [
"MIT"
] | 2 | 2022-03-15T01:13:27.000Z | 2022-03-21T08:30:54.000Z | from rmp2.rmpgraph.robotics import RobotRMPGraph | 48 | 48 | 0.895833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
03f4b115ea16b479cbe75ec350b750f18e4067a9 | 3,468 | py | Python | pushmi-pullyu.py | klynch/pushmi-pullyu | b17e29b7e7bdb1d1da59b61236572781980173f4 | [
"MIT"
] | null | null | null | pushmi-pullyu.py | klynch/pushmi-pullyu | b17e29b7e7bdb1d1da59b61236572781980173f4 | [
"MIT"
] | 1 | 2021-11-05T06:10:02.000Z | 2021-12-18T22:31:37.000Z | pushmi-pullyu.py | klynch/pushmi-pullyu | b17e29b7e7bdb1d1da59b61236572781980173f4 | [
"MIT"
] | 1 | 2020-06-02T12:00:22.000Z | 2020-06-02T12:00:22.000Z | #!/usr/bin/env python3
import argparse
import requests
import json
import os
import base64
from collections import namedtuple
import docker
Registry = namedtuple('Registry', ['name', 'tag_url', 'tag_func'])
REGISTRY_REGISTRY = {
'hub.docker.com': Registry(
name='hub.docker.com',
tag_url='https://registry.hub.docker.com/v1/repositories/library/mongo/tags',
tag_func=lambda x: [i['name'] for i in x],
),
'quay.io': Registry(
name='quay.io',
tag_url='https://quay.io/v1/repositories/{organization}/{repository}/tags',
tag_func=lambda x: list(x.keys()),
),
'gcr.io': Registry(
name='gcr.io',
tag_url='https://gcr.io/v2/{organization}/{repository}/tags/list',
tag_func=lambda x: x['tags'],
),
}
def get_config_auth(registry, config):
with open(config) as config:
config = json.load(config)
if registry in config['auths']:
auth = config['auths'][registry]['auth']
username,password = base64.b64decode(auth).decode('utf-8').split(':')
return requests.auth.HTTPBasicAuth(username, password)
return None
def get_tags(image, config):
parts = image.split('/')
if len(parts) == 3:
registry, organization, repository = parts
elif len(parts) == 2:
registry = 'hub.docker.com'
organization, repository = parts
elif len(parts) == 1:
registry = 'hub.docker.com'
organization = 'library'
repository = parts
else:
raise Exception('image issues')
registry = REGISTRY_REGISTRY[registry]
url = registry.tag_url.format(organization=organization, repository=repository)
response = requests.get(url, auth=get_config_auth(registry.name, config))
if response.status_code == 200:
return registry.tag_func(response.json())
else:
raise Exception('registry issues')
def list_tags(args, tags):
for tag in tags:
print(tag)
def pull_tags(args, tags):
client = docker.from_env()
for tag in tags:
print("Pulling image {}:{}".format(args.source, tag))
client.images.pull(args.source, tag=tag)
def sync_tags(args, tags):
client = docker.from_env()
for tag in tags:
print("Pulling image {}:{}".format(args.source, tag))
image = client.images.pull(args.source, tag=tag)
image.tag(args.destination, tag=tag)
print("Pushing image {}:{}".format(args.destination, tag))
client.images.push(args.destination, tag=tag)
parser = argparse.ArgumentParser(description='Pull all tags of a docker image and push to another repository')
parser.add_argument('--config', default='~/.docker/config.json', help='the docker configuration file used for login')
subparsers = parser.add_subparsers()
list_parser = subparsers.add_parser('list', help='list tags in source repository')
list_parser.set_defaults(func=list_tags)
pull_parser = subparsers.add_parser('pull', help='pull all tags from source registry')
pull_parser.set_defaults(func=pull_tags)
sync_parser = subparsers.add_parser('sync', aliases=['push'], help='syncrhonize tags from source registry to destionation')
sync_parser.add_argument('destination', help='the destination repository')
sync_parser.set_defaults(func=sync_tags)
parser.add_argument('source', help='the source repository')
args = parser.parse_args()
tags = get_tags(args.source, os.path.expanduser(args.config))
args.func(args, tags)
| 35.387755 | 123 | 0.678489 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 856 | 0.246828 |
03f575511edc87fbaa0168ce74fe3d45c2492f5f | 4,158 | py | Python | src/contactapp/migrations/0001_initial.py | robertsmoto/sodavault | 200e843be7abe6cc447647bba55c7c1309092e5e | [
"BSD-3-Clause"
] | null | null | null | src/contactapp/migrations/0001_initial.py | robertsmoto/sodavault | 200e843be7abe6cc447647bba55c7c1309092e5e | [
"BSD-3-Clause"
] | null | null | null | src/contactapp/migrations/0001_initial.py | robertsmoto/sodavault | 200e843be7abe6cc447647bba55c7c1309092e5e | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 3.2.3 on 2021-08-23 17:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('company_type', models.CharField(blank=True, choices=[('LOCA', 'Location'), ('SUPP', 'Suppplier'), ('CUST', 'Customer')], max_length=4)),
('name', models.CharField(blank=True, max_length=200)),
('phone', models.CharField(blank=True, max_length=200)),
('website', models.CharField(blank=True, max_length=200)),
('address_01', models.CharField(blank=True, max_length=200)),
('address_02', models.CharField(blank=True, max_length=200)),
('city', models.CharField(blank=True, max_length=200)),
('state', models.CharField(blank=True, max_length=200)),
('zipcode', models.CharField(blank=True, max_length=200)),
('ship_address_01', models.CharField(blank=True, max_length=200)),
('ship_address_02', models.CharField(blank=True, max_length=200)),
('ship_city', models.CharField(blank=True, max_length=200)),
('ship_state', models.CharField(blank=True, max_length=200)),
('ship_zipcode', models.CharField(blank=True, max_length=200)),
],
options={
'verbose_name_plural': 'companies',
},
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('person_type', models.CharField(blank=True, choices=[('CUST', 'Customer'), ('SUPP', 'Suppplier')], max_length=4)),
('firstname', models.CharField(blank=True, max_length=200)),
('lastname', models.CharField(blank=True, max_length=200)),
('nickname', models.CharField(blank=True, max_length=200)),
('phone', models.CharField(blank=True, max_length=200)),
('mobile', models.CharField(blank=True, max_length=200)),
('email', models.CharField(blank=True, max_length=200)),
('website', models.CharField(blank=True, max_length=200)),
('address_01', models.CharField(blank=True, max_length=200)),
('address_02', models.CharField(blank=True, max_length=200)),
('city', models.CharField(blank=True, max_length=200)),
('state', models.CharField(blank=True, max_length=200)),
('zipcode', models.CharField(blank=True, max_length=200)),
('ship_address_01', models.CharField(blank=True, max_length=200)),
('ship_address_02', models.CharField(blank=True, max_length=200)),
('ship_city', models.CharField(blank=True, max_length=200)),
('ship_state', models.CharField(blank=True, max_length=200)),
('shop_zipcode', models.CharField(blank=True, max_length=200)),
('company', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='contactapp.company')),
],
options={
'verbose_name_plural': 'people',
},
),
migrations.CreateModel(
name='Location',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('contactapp.company',),
),
migrations.CreateModel(
name='Supplier',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('contactapp.company',),
),
]
| 46.719101 | 154 | 0.549784 | 4,032 | 0.969697 | 0 | 0 | 0 | 0 | 0 | 0 | 715 | 0.171958 |
03f5f0261dae2f37b5e7db37db0f4a97a9efea20 | 3,860 | py | Python | examples/kcs.py | WeiZhixiong/ksc-sdk-python | a93237ce376e107eaae644678ef6b99819a9f8eb | [
"Apache-2.0"
] | 53 | 2016-09-21T15:52:14.000Z | 2021-12-23T09:23:00.000Z | examples/kcs.py | WeiZhixiong/ksc-sdk-python | a93237ce376e107eaae644678ef6b99819a9f8eb | [
"Apache-2.0"
] | 27 | 2016-09-21T15:24:43.000Z | 2021-11-18T08:38:38.000Z | examples/kcs.py | WeiZhixiong/ksc-sdk-python | a93237ce376e107eaae644678ef6b99819a9f8eb | [
"Apache-2.0"
] | 68 | 2016-09-06T10:33:09.000Z | 2021-11-16T07:13:03.000Z | # -*- encoding:utf-8 -*-
from kscore.session import get_session
if __name__ == "__main__":
s = get_session()
#确定服务名称以及机房
kcsClient = s.create_client("kcs", "cn-shanghai-3", use_ssl=False)
# 调用DescribeCacheReadonlyNode接口需要传入kcsv2
#kcsv2Client = s.create_client("kcsv2", "cn-shanghai-3", use_ssl=False)
# 创建缓存服务
#print(kcsClient.create_cache_cluster(**{'Name': 'pjl_sdk_test0921', 'Capacity': 1, 'NetType': 2, 'VpcId': '3c12ccdf-9b8f-4d9b-8aa6-a523897e97a1', 'VnetId': '293c16a5-c757-405c-a693-3b2a3adead50'}))
# 查询缓存服务列表
#print(kcsClient.describe_cache_clusters(**{'Offset': 0, 'Limit': 5, 'OrderBy': 'created,desc'}))
# 查询缓存服务详情
#print(kcsClient.describe_cache_cluster(**{'CacheId': '01988fc0-6041-49d2-b6b5-e2385e5d5edb'}))
# 重命名缓存服务
#print(kcsClient.rename_cache_cluster(**{'Name': 'pjl_test_sdk', 'CacheId': '01988fc0-6041-49d2-b6b5-e2385e5d5edb'}))
# 清空缓存服务
#print(kcsClient.flush_cache_cluster(**{'CacheId': '01988fc0-6041-49d2-b6b5-e2385e5d5edb'}))
# 更配缓存服务
#print(kcsClient.resize_cache_cluster(**{'CacheId': '01988fc0-6041-49d2-b6b5-e2385e5d5edb', 'Capacity': 2}))
# 删除缓存服务
#print(kcsClient.delete_cache_cluster(CacheId='b80ef266-dd52-47b2-9377-6a4a73626c19'))
# 查询缓存服务参数
#print(kcsClient.describe_cache_parameters(**{'CacheId': '01988fc0-6041-49d2-b6b5-e2385e5d5edb'}))
# 设置缓存服务参数
#print(kcsClient.set_cache_parameters(**{'CacheId': '01988fc0-6041-49d2-b6b5-e2385e5d5edb', 'Parameters.ParameterName.1': 'maxmemory-policy', 'Parameters.ParameterValue.1': 'allkeys-lru', 'ResetAllParameters': 'true'}))
# 查询缓存服务安全规则
#print(kcsClient.describe_cache_security_rules(**{'CacheId': '01988fc0-6041-49d2-b6b5-e2385e5d5edb'}))
# 设置缓存服务安全规则
#print(kcsClient.set_cache_security_rules(**{'CacheId': '01988fc0-6041-49d2-b6b5-e2385e5d5edb', 'SecurityRules.Cidr.1': '192.168.18.17/21'}))
# 删除缓存服务安全规则
#print(kcsClient.delete_cache_security_rule(**{'CacheId': '01988fc0-6041-49d2-b6b5-e2385e5d5edb', 'SecurityRuleId': 105}))
# 查询实例只读节点
#print(kcsv2Client.describe_cache_readonly_node(**{'CacheId': '01988fc0-6041-49d2-b6b5-e2385e5d5edb'}))
# 查询可用区
#print(kcsClient.describe_availability_zones(**{'Engine': 'redis', 'Mode': 1}))
# 查询机房
#print(kcsClient.describe_regions(**{'Engine': 'redis', 'Mode': 1}))
# 创建安全组
# print(kcsClient.create_security_group(**{'AvailableZone': 'az', 'Name': 'testPythonSdk', 'Description': 'testPythonSdk'}))
# 克隆安全组
# print(kcsClient.clone_security_group(**{'AvailableZone': 'az', 'Name': 'testPythonSdkClone', 'Description': 'testPythonSdkClone', 'SrcSecurityGroupId': 'srcSecurityGroupId'}))
# 删除安全组
# print(kcsClient.delete_security_group(**{'AvailableZone': 'az', 'SecurityGroupId.1': 'securityGroupId'}))
# 修改安全组
# print(kcsClient.modify_security_group(**{'AvailableZone': 'az', 'Name': 'testPythonSdk777', 'Description': 'testPythonSdk777', 'SecurityGroupId': 'securityGroupId'}))
# 查询安全组列表
# print(kcsClient.describe_security_groups(**{'AvailableZone': 'az'}))
# 查询安全组详情
# print(kcsClient.describe_security_group(**{'AvailableZone': 'az', 'SecurityGroupId': 'securityGroupId'}))
# 实例绑定安全组
# print(kcsClient.allocate_security_group(**{'AvailableZone': 'az', 'CacheId.1': 'cacheId', 'SecurityGroupId.1': 'securityGroupId'}))
# 实例解绑安全组
# print(kcsClient.deallocate_security_group(**{'AvailableZone': 'az', 'CacheId.1': 'cacheId', 'SecurityGroupId': 'securityGroupId'}))
# 创建安全组规则
# print(kcsClient.create_security_group_rule(**{'AvailableZone': 'az', 'SecurityGroupId': 'securityGroupId', 'Cidrs.1': "172.10.12.0/16"}))
# 删除安全组规则
# print(kcsClient.delete_security_group_rule(**{'AvailableZone': 'az', 'SecurityGroupId': 'securityGroupId', 'SecurityGroupRuleId.1': 'securityGroupRuleId'})) | 45.411765 | 223 | 0.70285 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,819 | 0.900708 |
03f71731cbda2b09821d59b339f1b486cf07bad6 | 4,221 | py | Python | dummy_agent.py | aivaslab/marlgrid | 10b53d27ce224fadeeb5830d6034350a69feb4b4 | [
"Apache-2.0"
] | null | null | null | dummy_agent.py | aivaslab/marlgrid | 10b53d27ce224fadeeb5830d6034350a69feb4b4 | [
"Apache-2.0"
] | null | null | null | dummy_agent.py | aivaslab/marlgrid | 10b53d27ce224fadeeb5830d6034350a69feb4b4 | [
"Apache-2.0"
] | null | null | null |
def create_supervised_data(env, agents, num_runs=50):
val = []
# the data threeple
action_history = []
predict_history = []
mental_history = []
character_history = []
episode_history = []
traj_history = []
grids = []
ep_length = env.maxtime
filler = env.get_filler()
obs = env.reset(setting=setting, num_visible=num_goals)
for ep in tqdm.tqdm(range(num_runs*eps_per_run)):
buffer_s = [np.zeros(obs[0].shape) for _ in range(env.maxtime)]
if (ep % eps_per_run) == eps_per_run-1:
obs = env.reset(setting=setting, num_visible=num_goals)
else:
obs = env.reset()
if ep % eps_per_run == 0:
episode_number = 0
#clear ep_history here?
for agent in agents:
if not unarbitrary_prefs:
agent.reset_prefs()
else:
agent.hardcode_prefs()
prevact = None
prevpos = None
agentpos = agents[0].pos
episode_time = 0
while not env.done:
if rendering and ((ep % eps_per_run) == eps_per_run-1):
env.render()
buffer_s.append(obs[0])
actions = [agent.action(torch.FloatTensor([buffer_s[-env.maxtime:]]).cuda()),]
agentpos = agents[0].pos
thistraj = env.get_trajectory(agentpos, prevact, prevpos)
prevpos = agentpos
#without agent position, thisact of none is pretty meaningless
prevact = actions[0]
traj_history += [thistraj, ]
#moved this to before following if
episode_time += 1
if ((ep % eps_per_run) == eps_per_run-1):
# each step in last episode
#episode number is 3
if visualize:
render_path(env, ep, episode_time, vispath)
#print(actions)
run = np.zeros((eps_per_run, ep_length, *filler.shape))
if eps_per_run > 1:
run[-episode_number-1:-1] = episode_history[-episode_number:]
episode = np.zeros((ep_length, *filler.shape))
episode[ep_length-episode_time:] = traj_history[-episode_time]
run[-1] = episode
shortterm = np.asarray(traj_history[-1])
action_history += [one_hot(5, actions[0]),]
character_history += [run,]
mental_history += [episode,]
predict_history += [shortterm,]
if not env.full_test:
break
obs, _, _, = env.step(actions)
# end of episode
episode = np.zeros((ep_length, *filler.shape))
episode[ep_length-episode_time:] = traj_history[-episode_time:]
episode_history += [episode, ]
episode_number += 1
return character_history, mental_history, predict_history, action_history
def format_data_torch(data, **train_kwargs):
char = np.asarray(data[0]).astype('float32')
# (N, Ep, F, W, H, C) = first.shape
#first.reshape((N, Ep, F, C, H, W))
char = np.swapaxes(char, 3, 5)
mental = np.asarray(data[1]).astype('float32')
# (N, F, W, H, C) = first.shape
#first.reshape((N, F, C, H, W))
mental = np.swapaxes(mental, 2, 4)
query = np.asarray(data[2][:]).astype('float32')
# (N, W, H, C) = second.shape
#second.reshape((N, C, H, W))
query = np.swapaxes(query, 1, 3)
act = np.asarray(data[3][:]).astype('int32')
char1 = torch.Tensor(char).cuda()#[:, 0, :, :, :, :]
mental1 = torch.Tensor(mental).cuda()
query1 = torch.Tensor(query).cuda()#[:, 0, :, :, :]
act1 = torch.Tensor(act).cuda()
dataset = torch.utils.data.TensorDataset(char1, mental1, query1, act1)
return torch.utils.data.DataLoader(dataset, **train_kwargs)
def supervised_training(env, agents, data):
dummies = [Dummy(steps, model) for agent in agents]
class DummyAgent():
'''
railroads the agent for some steps,
then switches to an alternate model.
railroaded steps should be included in
environment's test condition,
returned as the final value of reset()
predefined strategies after the railroaded
steps are compared with the alt model's output
'''
def __init__(self, railroad, strategies, model):
self.n = -1
self.length = len(railroad)
self.model = model
self.rails = railroad
self.strats = strategies
def choose_action(self, obs):
if n <= self.length:
self.n += 1
return self.railroad[self.n], [0 for x in self.strats]
else:
self.n += 1
act = self.model.choose_action(obs)
return act, [act == x[self.n] for x in self.strats]
def reset(railroad, strategies):
self.length = len(railroad)
self.rails = railroad
self.strats = strategies
| 25.581818 | 81 | 0.668325 | 847 | 0.200663 | 0 | 0 | 0 | 0 | 0 | 0 | 767 | 0.18171 |
03f77421a8248af15d6335d234c04c7267e108b3 | 1,695 | py | Python | src/server/utils.py | Krzem5/Python-School_Website | 5947b25a538c52fb475ccfbb87142dbe5ef5e0d0 | [
"BSD-3-Clause"
] | null | null | null | src/server/utils.py | Krzem5/Python-School_Website | 5947b25a538c52fb475ccfbb87142dbe5ef5e0d0 | [
"BSD-3-Clause"
] | null | null | null | src/server/utils.py | Krzem5/Python-School_Website | 5947b25a538c52fb475ccfbb87142dbe5ef5e0d0 | [
"BSD-3-Clause"
] | null | null | null | import builtins
import datetime
import inspect
import threading
import time
import ws
global _c,_pq,_l_ws,_sc
_c={}
_pq=None
_l_ws={}
_sc=None
_tl=threading.Lock()
def _print_q():
global _pq,_l_ws
lt=time.time()
fs=__import__("storage")
fs.set_silent("log.log")
dt=fs.read("log.log")
lc=dt.count(b"\n")
while (True):
if (len(_pq)>0):
_tl.acquire()
a,sf,_pq=" ".join([str(e) for e in _pq[0][0]]),_pq[0][1],_pq[1:]
_tl.release()
s=datetime.datetime.now().strftime(f"[{sf.filename[:-3]}{('.'+sf.function if sf.function!='<module>' else '')}, %H:%M:%S] {a}")
builtins.print(s)
s=bytes(s,"utf-8")
for k,v in list(_l_ws.items()):
if (v[1]==False):
_l_ws[k]=(v[0],True)
ws.send(b"1"+dt[:-1],thr=v[0])
ws.send(b"0"+s,thr=v[0])
dt+=s+b"\n"
lc+=1
if (lc>1024):
dt=dt[dt.index(b"\n")+1:]
if (time.time()>lt):
lt=time.time()+30
fs.write("log.log",dt)
def cache(fp):
global _c
if (fp in _c):
return _c[fp]
with open(fp,"rb") as f:
_c[fp]=f.read()
return _c[fp]
def print(*a):
global _pq
if (_pq is None):
_pq=[(a,inspect.getouterframes(inspect.currentframe(),2)[1])]
threading.Thread(target=_print_q).start()
else:
_tl.acquire()
_pq+=[(a,inspect.getouterframes(inspect.currentframe(),2)[1])]
_tl.release()
def ws_logs_start():
global _sc,_l_ws
def _ws_keep_alive(a,t):
while (a in _l_ws):
ws.send(b"null",thr=t)
time.sleep(20)
if (_sc is None):
_sc=__import__("server")
a=_sc.address()
_l_ws[a]=(threading.current_thread(),False)
thr=threading.Thread(target=_ws_keep_alive,args=(a,_l_ws[a][0]))
thr.daemon=True
thr.start()
def ws_logs_end():
global _l_ws
del _l_ws[_sc.address()]
| 18.833333 | 130 | 0.629499 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 179 | 0.105605 |
03f8dea5d4d479b6d242969494dabbcbf9fdaa1f | 4,593 | py | Python | tests/test_operation.py | InTack2/boip | 99a2c1cf7116dc4a28453d44ac9768446241174d | [
"MIT"
] | null | null | null | tests/test_operation.py | InTack2/boip | 99a2c1cf7116dc4a28453d44ac9768446241174d | [
"MIT"
] | 1 | 2020-09-28T15:26:02.000Z | 2020-09-28T15:26:02.000Z | tests/test_operation.py | InTack2/boip | 99a2c1cf7116dc4a28453d44ac9768446241174d | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import generators
from __future__ import division
import os
import pytest
from boip import operation
SCRIPT_PATH = os.path.dirname(__file__)
@pytest.fixture
def sample_data_path():
data_path = os.path.join(SCRIPT_PATH, "data")
return data_path
@pytest.fixture
def sample_yaml(sample_data_path):
target_path = os.path.join(sample_data_path, operation.SETTING_FILE_NAME)
return target_path
@pytest.fixture
def sample_template_folder(sample_data_path):
template_path = os.path.join(sample_data_path, operation.TEMPLATE_FOLDER_NAME)
return template_path
@pytest.fixture
def sample_create_boip_set(sample_data_path):
boip_set_list = operation.BoipSetList(sample_data_path)
return boip_set_list
class TestFileFormatter(object):
"""test code to FileFormatter.
"""
def test_replace_file(self, tmp_path):
temporary_path = tmp_path / "test_replace_file"
temporary_path.mkdir()
temp_file = temporary_path / "sample.txt"
temp_file.write_text(r"{sample}")
sample_formatter_data = {"sample": "replace word"}
_operation = operation.FileFormatter(sample_formatter_data)
_operation.replace_file(str(temp_file), "txt")
assert "replace word" == temp_file.read_text()
class TestFolderFormatter(object):
"""test code to FolderFormatter.
"""
def test_replace_file(self, tmp_path):
temporary_path = tmp_path / "test_template_folder_replace_file"
temporary_path.mkdir()
temp_file = temporary_path / "sample.txt"
temp_file_2 = temporary_path / "sample_2.txt"
temp_file.write_text(r"{sample}")
temp_file_2.write_text(r"{sample} {sample}")
sample_formatter_data = {"sample": "replace word"}
_operation = operation.FolderFormatter(str(temporary_path), {"txt": "txt"}, sample_formatter_data)
_operation.replace_files()
assert "replace word" == temp_file.read_text()
assert "replace word replace word" == temp_file_2.read_text()
class TestBoipSetList(object):
"""test code to BoipSetList.
"""
def test_select_template_path(self, sample_create_boip_set, sample_template_folder):
assert sample_template_folder == sample_create_boip_set.select_template_path("sample")
def test_select_questions(self, sample_create_boip_set):
assert [{"message": "what question?", "name": "sample"}] == sample_create_boip_set.select_questions("sample")
def test_select_convert_extensions(self, sample_create_boip_set):
assert {"txt": "py", "ui": "ui"} == sample_create_boip_set.select_convert_extensions("sample")
def test_duplicate_template_folder(self, tmp_path, sample_create_boip_set):
temporary_path = tmp_path / "test_duplicate"
template_folder_path = sample_create_boip_set.select_template_path("sample")
sample_create_boip_set.duplicate_template_folder(template_folder_path, str(temporary_path))
assert 1 == len(list(temporary_path.iterdir()))
def test_get_title_list(self, sample_create_boip_set):
title_list = sample_create_boip_set.get_title_list()
assert 2 == len(title_list)
class TestYamlFileReader(object):
"""test code to YamlFIleReader.
"""
@pytest.fixture
def template_yaml(self, tmpdir):
"""一時ファイル
"""
test_file = tmpdir.mkdir("TestLoadTemplateTextFile").join("sample.yaml")
test_file.write("temp: sample")
return test_file
def test_reading_temp_in_text_file_as_string(self, template_yaml):
"""テキストファイルの中の{temp}を文字列として読み込む
"""
load_text = operation.YamlFileReader(str(template_yaml)).get_read_data()
assert {"temp": "sample"} == load_text
class TestSettingData(object):
"""test code SettingData.
"""
parameters = [("title", "sample"),
("questions", [{"name": "sample", "message": "what question?"}]),
("convert_extensions", {"txt": "py", "ui": "ui"}),
("template_path", "sample/path")
]
@pytest.mark.parametrize("search_attr, answer", parameters)
def test_value(self, sample_yaml, search_attr, answer):
"""yamlファイルを渡し、対応する値になっているか確認する
"""
ins = operation.SettingData(sample_yaml, "sample/path")
compare_value = getattr(ins, search_attr)
assert answer == compare_value
| 32.574468 | 117 | 0.698236 | 3,761 | 0.800724 | 0 | 0 | 1,188 | 0.252927 | 0 | 0 | 1,038 | 0.220992 |
03f90b9c017571d5e21e3b7da29f1645b4a33491 | 85 | py | Python | service/models/__init__.py | CottageLabs/lodestone | 2e60f2138a49633398655bb7f728fd3d6ac92c43 | [
"Apache-2.0"
] | null | null | null | service/models/__init__.py | CottageLabs/lodestone | 2e60f2138a49633398655bb7f728fd3d6ac92c43 | [
"Apache-2.0"
] | null | null | null | service/models/__init__.py | CottageLabs/lodestone | 2e60f2138a49633398655bb7f728fd3d6ac92c43 | [
"Apache-2.0"
] | null | null | null | from service.models.ethesis import Ethesis
from service.models.dataset import Dataset | 42.5 | 42 | 0.870588 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
03fcdf32087c44ef545515c06175fa5dfd2d8041 | 122 | py | Python | rmepy/robot_modules/__init__.py | 233a344a455/RobomasterEPlib | d0497d06d107c482e7b4c80c54c7c05c0bf62e21 | [
"MIT"
] | 3 | 2020-04-23T14:19:59.000Z | 2020-10-06T17:02:12.000Z | rmepy/robot_modules/__init__.py | 233a344a455/RobomasterEPlib | d0497d06d107c482e7b4c80c54c7c05c0bf62e21 | [
"MIT"
] | null | null | null | rmepy/robot_modules/__init__.py | 233a344a455/RobomasterEPlib | d0497d06d107c482e7b4c80c54c7c05c0bf62e21 | [
"MIT"
] | 2 | 2020-05-13T08:15:16.000Z | 2020-05-13T08:55:51.000Z | from .basic_ctrl import BasicCtrl
from .chassis import Chassis
from .gimbal import Gimbal
from .blaster import Blaster | 30.5 | 34 | 0.811475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
03fe24516cc44d26a56b806f17cdc2963b402fd8 | 12,360 | py | Python | build.py | MrCoft/EngiMod | 65c90bd9231ac388d8af7849a1835914f1eefc78 | [
"MIT"
] | null | null | null | build.py | MrCoft/EngiMod | 65c90bd9231ac388d8af7849a1835914f1eefc78 | [
"MIT"
] | null | null | null | build.py | MrCoft/EngiMod | 65c90bd9231ac388d8af7849a1835914f1eefc78 | [
"MIT"
] | null | null | null | import utils
from utils import format
import os
import tempfile
import urllib.request
import shutil
import zipfile
spire_dir = r"D:\Games\Slay the Spire Modded"
mod_dir = os.path.join("cache", "mod")
def build():
# STEP: clone FruityMod
if not os.path.exists(mod_dir):
print("Downloading {}".format("FruityMod"))
fruity_url = r"https://github.com/gskleres/FruityMod-StS/archive/v0.6.2b.zip"
utils.mkdir("cache")
download_file = tempfile.NamedTemporaryFile(suffix=".zip", dir="cache", delete=False).name
with urllib.request.urlopen(fruity_url) as response, open(download_file, "wb") as out_file:
shutil.copyfileobj(response, out_file)
utils.unzip(download_file, mod_dir, shift=1, remove=True)
# STEP: fetch libs
mod_jar = os.path.join(spire_dir, "ModTheSpire.jar")
if not os.path.exists(mod_jar):
print("Downloading ModTheSpire")
download_file = tempfile.NamedTemporaryFile(suffix=".zip", dir="..", delete=False).name
urllib.request.urlretrieve("https://github.com/kiooeht/ModTheSpire/releases/download/v2.6.0/ModTheSpire.zip", download_file)
with zipfile.ZipFile(download_file, "r") as archive, open(mod_jar, "wb") as file:
jar_data = archive.read("ModTheSpire.jar")
file.write(jar_data)
os.remove(download_file)
base_jar = os.path.join(spire_dir, "mods", "BaseMod.jar")
if not os.path.exists(base_jar):
print("Downloading BaseMod")
urllib.request.urlretrieve("https://github.com/daviscook477/BaseMod/releases/download/v2.9.1/BaseMod.jar", base_jar)
from spire import name_id
import textwrap
import io
import json
print("Generating data")
image_dir = os.path.join("assets", "images")
if os.path.exists(os.path.join("cache", "DEBUG")):
image_dir = os.path.join("todo", "images")
# STEP: generate cards
from engi_mod import cards
with open(os.path.join("templates", "card.java"), encoding="utf-8") as file:
card_template = file.read()
for card in cards:
with open(os.path.join(mod_dir, *r"src\main\java\fruitymod\cards".split("\\"), name_id(card["name"]) + ".java"), "w", encoding="utf-8") as file:
file.write(format(card_template, card))
# STEP: patch code
templates_cache = os.path.join("cache", "templates")
if not os.path.exists(templates_cache):
utils.mkdir(templates_cache)
shutil.copy(os.path.join(mod_dir, *r"src\main\java\fruitymod\FruityMod.java".split("\\")), os.path.join(templates_cache, "FruiyMod.java"))
shutil.copy(os.path.join(mod_dir, *r"src\main\java\fruitymod\characters\TheSeeker.java".split("\\")), os.path.join(templates_cache, "TheSeeker.java"))
shutil.copy(os.path.join(mod_dir, *r"src\main\resources\localization\FruityMod-CardStrings.json".split("\\")), os.path.join(templates_cache, "FruityMod-CardStrings.json"))
image_code = io.StringIO()
add_code = io.StringIO()
unlock_code = io.StringIO()
for card in cards:
id = name_id(card["name"], upper=True).lower()
image_file = os.path.join(image_dir, id + ".png")
image_file = "cards/{}.png".format(id if os.path.exists(image_file) else "runic_binding")
image_code.write(format(
'public static final String {{ name_id(card["name"], upper=True) }} = "{{ image_file }}";'
) + "\n")
if card["rarity"] != "special":
add_code.write(format(
'BaseMod.addCard(new {{ name_id(card["name"]) }}());'
) + "\n")
unlock_code.write(format(
'UnlockTracker.unlockCard("{{ card["name"] }}");'
) + "\n")
with open(os.path.join(templates_cache, "FruiyMod.java"), encoding="utf-8") as file:
fruity_lines = [line for line in file]
for i, line in enumerate(fruity_lines):
if "public static final String PHASE_COIL" in line:
fruity_lines.insert(i + 1, "\n" + textwrap.indent(image_code.getvalue(), " " * 4))
break
for i, line in enumerate(fruity_lines):
if "BaseMod.addCard(new Nexus())" in line:
fruity_lines.insert(i + 1, "\n" + textwrap.indent(add_code.getvalue(), " " * 4 * 2))
fruity_lines.insert(i + 2, "\n" + textwrap.indent(unlock_code.getvalue(), " " * 4 * 2))
break
with open(os.path.join(mod_dir, *r"src\main\java\fruitymod\FruityMod.java".split("\\")), "w", encoding="utf-8") as file:
file.write("".join(fruity_lines))
with open(os.path.join(templates_cache, "TheSeeker.java"), encoding="utf-8") as file:
seeker_lines = [line for line in file]
# STEP: starting relic
from engi_mod import relic
for i, line in enumerate(seeker_lines):
if "Arcanosphere" in line:
del seeker_lines[i:i+2]
seeker_lines.insert(i, "\n{}\n\n".format(textwrap.indent(textwrap.dedent(format("""
retVal.add("{{ relic }}");
UnlockTracker.markRelicAsSeen("{{ relic }}");
""")).strip(), " " * 4 * 2)))
break
# STEP: starting deck
from engi_mod import deck
if not deck:
deck = [card["name"] for card in cards if card["rarity"] != "special"]
for i, line in enumerate(seeker_lines):
if "Strike_P" in line:
for j, line in enumerate(seeker_lines):
if "AstralHaze" in line:
break
del seeker_lines[i:j+1]
seeker_lines.insert(i, "\n{}\n\n".format(textwrap.indent(
"\n".join('retVal.add("{}");'.format(card) for card in deck)
, " " * 4 * 2)))
break
with open(os.path.join(mod_dir, *r"src\main\java\fruitymod\characters\TheSeeker.java".split("\\")), "w", encoding="utf-8") as file:
file.write("".join(seeker_lines))
card_strings = json.load(open(os.path.join(templates_cache, "FruityMod-CardStrings.json"), encoding="utf-8"))
for card in cards:
data = {
"NAME": card["name"],
"DESCRIPTION": card["desc"],
}
desc = card.get("upgrade_desc")
if desc:
data["UPGRADE_DESCRIPTION"] = desc
card_strings[card["name"]] = data
json.dump(card_strings,
open(os.path.join(mod_dir, *r"src\main\resources\localization\FruityMod-CardStrings.json".split("\\")),
"w", encoding="utf-8"), sort_keys=True, indent=4)
# STEP: generate powers
from engi_mod import powers
with open(os.path.join("templates", "power.java"), encoding="utf-8") as file:
power_template = file.read()
for power in powers:
with open(os.path.join(mod_dir, *r"src\main\java\fruitymod\powers".split("\\"), power["id"] + ".java"), "w", encoding="utf-8") as file:
file.write(format(power_template, power))
# STEP: generate actions
from engi_mod import actions
with open(os.path.join("templates", "action.java"), encoding="utf-8") as file:
action_template = file.read()
for action in actions:
with open(os.path.join(mod_dir, *r"src\main\java\fruitymod\actions\unique".split("\\"), action["id"] + ".java"), "w", encoding="utf-8") as file:
file.write(format(action_template, action))
# STEP: generate java files
from engi_mod import javas
with open(os.path.join("templates", "java.java"), encoding="utf-8") as file:
java_template = file.read()
for java in javas:
with open(os.path.join(mod_dir, *r"src\main\java".split("\\"), *java["package"], java["name"] + ".java"), "w", encoding="utf-8") as file:
file.write(format(java_template, java))
# STEP: card images
print("Generating images")
import numpy as np
portrait_masks = {}
for type in "attack skill power".split():
image = utils.open_data(os.path.join("templates", "1024Portraits_{}_mask.png".format(type)))
image = image / 255
image = np.repeat(image[:,:,:1], 4, axis=-1)
portrait_masks[type] = image
for card in cards:
id = name_id(card["name"], upper=True).lower()
image_file = os.path.join(image_dir, id + ".png")
target_p_file = os.path.join(mod_dir, *r"src\main\resources\img\cards".split("\\"), id + "_p" + ".png")
target_file = os.path.join(mod_dir, *r"src\main\resources\img\cards".split("\\"), id + ".png")
if os.path.exists(target_p_file):
continue
if os.path.exists(image_file):
image = utils.open_data(image_file)
from skimage.transform import resize
target = 500, 380
r = image.shape[0] / image.shape[1]
if r >= target[0] / target[1]:
size = np.ceil(target[1] * r).astype("int"), target[1]
x = np.round((size[0] - target[0]) / 2).astype("int")
image = resize(image, size, mode="edge")[x:x+target[0]]
else:
size = target[0], np.ceil(target[0] / r).astype("int")
image = resize(image, size, mode="edge")[:,:target[1]]
image *= portrait_masks[card["type"]]
from PIL import Image
img = Image.fromarray(np.round(image * 255).astype("uint8").transpose((1, 0, 2)))
img.save(target_p_file)
target = 250, 190
image = resize(image, target, mode="edge")
img = Image.fromarray(np.round(image * 255).astype("uint8").transpose((1, 0, 2)))
img.save(target_file)
# STEP: card borders
utils.sync(os.path.join("assets", "512"), os.path.join(mod_dir, *r"src\main\resources\img\512".split("\\")))
utils.sync(os.path.join("assets", "1024"), os.path.join(mod_dir, *r"src\main\resources\img\1024".split("\\")))
# STEP: keywords
from engi_mod import keywords
keyword_code = io.StringIO()
for name, keyword in keywords.items():
words = ", ".join('"{}"'.format(word) for word in [name.lower()] + keyword["words"])
keyword_code.write(format(
'BaseMod.addKeyword(new String[] {"{{ name }}", {{ words }}}, "{{ keyword["desc"] }}");'
) + "\n")
with open(os.path.join(mod_dir, *r"src\main\java\fruitymod\FruityMod.java".split("\\")), encoding="utf-8") as file:
fruity_lines = [line for line in file]
for i, line in enumerate(fruity_lines):
if '{"intangible", "Intangible"}, "All damage and HP loss you suffer is reduced to 1."' in line:
fruity_lines.insert(i + 1, "\n" + textwrap.indent(keyword_code.getvalue(), " " * 4 * 2))
break
with open(os.path.join(mod_dir, *r"src\main\java\fruitymod\FruityMod.java".split("\\")), "w", encoding="utf-8") as file:
file.write("".join(fruity_lines))
# STEP: mod info
old_info = os.path.join(mod_dir, *r"src\main\resources\ModTheSpire.config".split("\\"))
if os.path.exists(old_info):
os.remove(old_info)
from engi_mod import info
json.dump(info, open(os.path.join(mod_dir, *r"src\main\resources\ModTheSpire.json".split("\\")), "w", encoding="utf-8"), indent=4)
# STEP: maven project
pom_template = os.path.join(templates_cache, "pom.xml")
if not os.path.exists(pom_template):
shutil.copy(os.path.join(mod_dir, "pom.xml"), pom_template)
with open(pom_template, encoding="utf-8") as file:
pom = file.read()
pom = pom.replace("${basedir}/../lib/ModTheSpire.jar", "/".join(spire_dir.split(os.path.sep) + ["ModTheSpire.jar"]))
pom = pom.replace("${basedir}/../lib/BaseMod.jar", "/".join(spire_dir.split(os.path.sep) + ["mods", "BaseMod.jar"]))
pom = pom.replace("${basedir}/../lib/desktop-1.0.jar", "/".join(spire_dir.split(os.path.sep) + ["desktop-1.0.jar"]))
jar_file = os.path.join(spire_dir, "mods", "EngiMod.jar")
pom = pom.replace("../_ModTheSpire/mods/FruityMod.jar", "/".join(jar_file.split(os.path.sep)))
with open(os.path.join(mod_dir, "pom.xml"), "w", encoding="utf-8") as file:
file.write(pom)
# STEP: compile
if os.path.exists(jar_file):
os.remove(jar_file)
with utils.cd(mod_dir):
os.system("mvn package")
if not os.path.exists(jar_file):
print("Compilation failed")
return
# STEP: test
with utils.cd(spire_dir):
os.system("ModTheSpire.jar")
if __name__ == "__main__":
build()
| 47.722008 | 179 | 0.611408 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,398 | 0.274919 |
ff0002f28ad3a199bd96b680511c5012fe2c72ff | 107 | py | Python | src/filtermaker/__init__.py | yukihira1992/filtermaker | 0fdd76771ea551ecdfe3328eadec32f59d0f5f8c | [
"MIT"
] | null | null | null | src/filtermaker/__init__.py | yukihira1992/filtermaker | 0fdd76771ea551ecdfe3328eadec32f59d0f5f8c | [
"MIT"
] | null | null | null | src/filtermaker/__init__.py | yukihira1992/filtermaker | 0fdd76771ea551ecdfe3328eadec32f59d0f5f8c | [
"MIT"
] | null | null | null | from .filters import TextFilter
__version__ = '0.0.1'
__all__ = (
'__version__',
'TextFilter',
)
| 11.888889 | 31 | 0.64486 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.299065 |
ff011eb3b30a5dcec0975d8afd7f72454da4922d | 4,378 | py | Python | projects/migrations/0001_initial.py | louisenje/project-rate | b11e209bebdf59983d967864a049538b2807acd2 | [
"MIT"
] | null | null | null | projects/migrations/0001_initial.py | louisenje/project-rate | b11e209bebdf59983d967864a049538b2807acd2 | [
"MIT"
] | null | null | null | projects/migrations/0001_initial.py | louisenje/project-rate | b11e209bebdf59983d967864a049538b2807acd2 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.3 on 2021-06-02 07:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='NewsLetterRecipients',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_pic', models.ImageField(blank=True, default='media/profile/male.png', upload_to='profile/')),
('bio', models.TextField(blank=True, default='*No Bio*')),
('phone_no', models.IntegerField(blank=True, null=True)),
('gender', models.CharField(blank=True, max_length=10)),
('pub_date', models.DateTimeField(auto_now_add=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='webapps',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('main_picture', models.ImageField(default='webapps/internet.png', upload_to='webapps/')),
('screenshot1', models.ImageField(blank=True, default='webapps/internet.png', upload_to='webapps/')),
('screenshot2', models.ImageField(blank=True, default='webapps/internet.png', upload_to='webapps/')),
('screenshot3', models.ImageField(blank=True, default='webapps/internet.png', upload_to='webapps/')),
('link', models.CharField(max_length=200)),
('description', models.TextField()),
('pub_date', models.DateTimeField(auto_now_add=True)),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.profile')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-pub_date'],
},
),
migrations.CreateModel(
name='ratings',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rate_by_design', models.IntegerField(choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], default=0)),
('rate_by_usability', models.IntegerField(choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], default=0)),
('rate_by_content', models.IntegerField(choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], default=0)),
('rate_by_creativity', models.IntegerField(choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], default=0)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('webapp', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='projects.webapps')),
],
),
migrations.CreateModel(
name='comment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(blank=True, max_length=80)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('webapp', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.webapps')),
],
),
]
| 56.128205 | 171 | 0.574006 | 4,219 | 0.963682 | 0 | 0 | 0 | 0 | 0 | 0 | 670 | 0.153038 |
ff03b5152146f8e41903bfd092d0c4ff488dcd2f | 1,068 | py | Python | osbenchmark/builder/downloaders/repositories/repository_url_provider.py | engechas/opensearch-benchmark | d11db3721aebf5419a7fc0b8a7d300a1d63ddbfe | [
"Apache-2.0"
] | 26 | 2021-12-09T06:58:53.000Z | 2022-03-29T15:01:37.000Z | osbenchmark/builder/downloaders/repositories/repository_url_provider.py | engechas/opensearch-benchmark | d11db3721aebf5419a7fc0b8a7d300a1d63ddbfe | [
"Apache-2.0"
] | 63 | 2021-12-08T20:47:17.000Z | 2022-03-31T18:21:31.000Z | osbenchmark/builder/downloaders/repositories/repository_url_provider.py | engechas/opensearch-benchmark | d11db3721aebf5419a7fc0b8a7d300a1d63ddbfe | [
"Apache-2.0"
] | 5 | 2021-12-09T10:17:30.000Z | 2022-03-03T05:31:12.000Z | from functools import reduce
from osbenchmark.exceptions import SystemSetupError
class RepositoryUrlProvider:
def __init__(self, template_renderer, artifact_variables_provider):
self.template_renderer = template_renderer
self.artifact_variables_provider = artifact_variables_provider
def render_url_for_key(self, host, config_variables, key, mandatory=True):
try:
url_template = self._get_value_from_dot_notation_key(config_variables, key)
except TypeError:
if mandatory:
raise SystemSetupError(f"Config key [{key}] is not defined.")
else:
return None
artifact_version = config_variables["distribution"]["version"]
artifact_variables = self.artifact_variables_provider.get_artifact_variables(host, artifact_version)
return self.template_renderer.render_template_string(url_template, artifact_variables)
def _get_value_from_dot_notation_key(self, dict_object, key):
return reduce(dict.get, key.split("."), dict_object)
| 41.076923 | 108 | 0.73221 | 983 | 0.920412 | 0 | 0 | 0 | 0 | 0 | 0 | 63 | 0.058989 |
ff03f3f76a27457124aff233817d512634898ae3 | 4,953 | py | Python | setup_cares.py | thedrow/pycares | ecb5062c31aae66c655c1526ccf21ee0c944d414 | [
"MIT"
] | null | null | null | setup_cares.py | thedrow/pycares | ecb5062c31aae66c655c1526ccf21ee0c944d414 | [
"MIT"
] | null | null | null | setup_cares.py | thedrow/pycares | ecb5062c31aae66c655c1526ccf21ee0c944d414 | [
"MIT"
] | null | null | null |
import errno
import os
import subprocess
import sys
from distutils import log
from distutils.command.build_ext import build_ext
from distutils.errors import DistutilsError
def exec_process(cmdline, silent=True, catch_enoent=True, input=None, **kwargs):
"""Execute a subprocess and returns the returncode, stdout buffer and stderr buffer.
Optionally prints stdout and stderr while running."""
try:
sub = subprocess.Popen(args=cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
stdout, stderr = sub.communicate(input=input)
if type(stdout) != type(""):
# decode on Python 3
# do nothing on Python 2 (it just doesn't care about encoding anyway)
stdout = stdout.decode(sys.getdefaultencoding(), "replace")
stderr = stderr.decode(sys.getdefaultencoding(), "replace")
returncode = sub.returncode
if not silent:
sys.stdout.write(stdout)
sys.stderr.write(stderr)
except OSError as e:
if e.errno == errno.ENOENT and catch_enoent:
raise DistutilsError('"%s" is not present on this system' % cmdline[0])
else:
raise
if returncode != 0:
raise DistutilsError('Got return value %d while executing "%s", stderr output was:\n%s' % (returncode, " ".join(cmdline), stderr.rstrip("\n")))
return stdout
def exec_make(cmdline, *args, **kwargs):
assert isinstance(cmdline, list)
makes = ["make"]
if "bsd" in sys.platform:
makes.insert(0, "gmake")
for make in makes:
if "bsd" in sys.platform and make == "make":
log.warn("Running plain make on BSD-derived system. It will likely fail. Consider installing GNU make from the ports collection.")
try:
return exec_process([make] + cmdline, *args, catch_enoent=False, **kwargs)
except OSError as e:
if e.errno != errno.ENOENT:
raise
raise DistutilsError('"make" is not present on this system')
class cares_build_ext(build_ext):
cares_dir = os.path.join('deps', 'c-ares')
user_options = build_ext.user_options
user_options.extend([
("cares-clean-compile", None, "Clean c-ares tree before compilation"),
])
boolean_options = build_ext.boolean_options
boolean_options.extend(["cares-clean-compile"])
def initialize_options(self):
build_ext.initialize_options(self)
self.cares_clean_compile = 0
def build_extensions(self):
if self.compiler.compiler_type == 'mingw32':
# Dirty hack to avoid linking with more than one C runtime when using MinGW
self.compiler.dll_libraries = [lib for lib in self.compiler.dll_libraries if not lib.startswith('msvcr')]
self.force = self.cares_clean_compile
if self.compiler.compiler_type == 'msvc':
self.cares_lib = os.path.join(self.cares_dir, 'cares.lib')
else:
self.cares_lib = os.path.join(self.cares_dir, 'libcares.a')
self.build_cares()
# Set compiler options
if self.compiler.compiler_type == 'mingw32':
self.compiler.add_library_dir(self.cares_dir)
self.compiler.add_library('cares')
self.extensions[0].extra_objects = [self.cares_lib]
self.compiler.add_include_dir(os.path.join(self.cares_dir, 'src'))
if sys.platform.startswith('linux'):
self.compiler.add_library('rt')
elif sys.platform == 'win32':
if self.compiler.compiler_type == 'msvc':
self.extensions[0].extra_link_args = ['/NODEFAULTLIB:libcmt']
self.compiler.add_library('advapi32')
self.compiler.add_library('iphlpapi')
self.compiler.add_library('psapi')
self.compiler.add_library('ws2_32')
build_ext.build_extensions(self)
def build_cares(self):
#self.debug_mode = bool(self.debug) or hasattr(sys, 'gettotalrefcount')
win32_msvc = self.compiler.compiler_type == 'msvc'
def build():
cflags = '-fPIC'
env = os.environ.copy()
env['CFLAGS'] = ' '.join(x for x in (cflags, env.get('CFLAGS', None)) if x)
log.info('Building c-ares...')
if win32_msvc:
exec_process('cmd.exe /C vcbuild.bat', cwd=self.cares_dir, env=env, shell=True)
else:
exec_make(['libcares.a'], cwd=self.cares_dir, env=env)
def clean():
if win32_msvc:
exec_process('cmd.exe /C vcbuild.bat clean', cwd=self.cares_dir, shell=True)
else:
exec_make(['clean'], cwd=self.cares_dir)
if self.cares_clean_compile:
clean()
if not os.path.exists(self.cares_lib):
log.info('c-ares needs to be compiled.')
build()
else:
log.info('No need to build c-ares.')
| 40.933884 | 151 | 0.626085 | 2,898 | 0.5851 | 0 | 0 | 0 | 0 | 0 | 0 | 1,138 | 0.22976 |
ff04fdc9886ae14cf3e6ffff47f1dd3087fb8967 | 3,176 | py | Python | FaceTemplateMatching.py | domjhill/Python-FaceTemplateMatching | 4bf72a8534cd1c333956c75ca2fd2851d5d0fbea | [
"MIT"
] | 6 | 2017-08-13T16:55:52.000Z | 2021-07-12T03:33:48.000Z | FaceTemplateMatching.py | domjhill/Python-FaceTemplateMatching | 4bf72a8534cd1c333956c75ca2fd2851d5d0fbea | [
"MIT"
] | null | null | null | FaceTemplateMatching.py | domjhill/Python-FaceTemplateMatching | 4bf72a8534cd1c333956c75ca2fd2851d5d0fbea | [
"MIT"
] | 5 | 2017-08-13T16:56:04.000Z | 2021-01-08T09:00:13.000Z | import cv2
from threading import Thread
import datetime
import time
import sys
class FPSCounter:
def __init__(self):
self._start = None
self._end = None
self._noFrames = 0
def start(self):
self._start = datetime.datetime.now()
return self
def stop(self):
self._end = datetime.datetime.now()
def update(self):
self._noFrames += 1
def elapsed(self):
return (self._end - self._start).total_seconds()
def fps(self):
return self._noFrames/self.elapsed()
class FrameGrabber:
def __init__(self, src=0):
self.vidStream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.vidStream.read()
self.stopped = False
def start(self):
Thread(target=self.grabFrame, args=()).start()
return self
def grabFrame(self):
while True:
if self.stopped:
return
(self.grabbed, self.frame) = self.vidStream.read()
def read(self):
return self.frame
def stop(self):
self.vidStream.release()
self.stopped = True
vidStream = FrameGrabber(src=0).start()
cascadeFace = cv2.CascadeClassifier('lbpcascade_frontalface.xml')
if (len(sys.argv) == 1):
template = cv2.imread('template.png', 0)
else:
imgPath = sys.argv[1]
template = cv2.imread(imgPath, 0)
if template is None:
#If no template file exists, open video stream to capture template
while (True):
tempFrame = vidStream.read()
cv2.imshow('Template Capture', tempFrame)
template = cv2.cvtColor(tempFrame, cv2.COLOR_BGR2GRAY)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.imwrite("template.png", template)
break
w,h = template.shape[::-1]
#Reducing the template image to crop out the face
face = cascadeFace.detectMultiScale(template, scaleFactor=1.3, minNeighbors=5, minSize=(25,25))
padding = 30
for (x,y,w,h) in face:
cv2.rectangle(template, (x,y-30), (x + w, y + h+20), (0,255,0), 2)
cropped = template[y-30:y+h+20, x:x+w]
cv2.imshow('Template', template)
cv2.imshow('Cropped', cropped)
cv2.waitKey(1)
fps = FPSCounter().start()
while True:
frame = vidStream.read()
cv2.imshow('Frame', frame)
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
cv2.imshow('Gray', gray)
faceCam = cascadeFace.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5, minSize=(25,25))
for (x,y,w,h) in faceCam:
croppedResized = cv2.resize(cropped, (w,h), interpolation=cv2.INTER_LINEAR)
cv2.imshow('Resized', croppedResized)
mat = cv2.matchTemplate(gray, croppedResized, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(mat)
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h + 30)
cv2.rectangle(frame, top_left, bottom_right, (0,255,0), 2)
time.sleep(0.001)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
fps.update()
fps.stop()
print('FPS: ', fps.fps())
print('Elapsed seconds: ', fps.elapsed())
vidStream.stop()
cv2.destroyAllWindows()
| 27.145299 | 98 | 0.621537 | 1,089 | 0.342884 | 0 | 0 | 0 | 0 | 0 | 0 | 262 | 0.082494 |
ff06a4859edba98a7a88d5622aad9b23c4419609 | 12,017 | py | Python | kafka_proto_api/protos/etf_http_ref_pb2.py | Ycallaer/kafka_proto_py | 478f0ac7a95e4c14f4bb2f1deeef60df0c8aa133 | [
"MIT"
] | 1 | 2021-03-24T12:43:24.000Z | 2021-03-24T12:43:24.000Z | kafka_proto_api/protos/etf_http_ref_pb2.py | Ycallaer/kafka_proto_py | 478f0ac7a95e4c14f4bb2f1deeef60df0c8aa133 | [
"MIT"
] | null | null | null | kafka_proto_api/protos/etf_http_ref_pb2.py | Ycallaer/kafka_proto_py | 478f0ac7a95e4c14f4bb2f1deeef60df0c8aa133 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: etf_http_ref.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='etf_http_ref.proto',
package='',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x12\x65tf_http_ref.proto\x1a google/protobuf/descriptor.proto\"\xa5\x01\n\x0c\x65tf_http_ref\x12\x0c\n\x04\x64\x61te\x18\x01 \x01(\t\x12\x0c\n\x04open\x18\x02 \x01(\x01\x12\x0c\n\x04high\x18\x03 \x01(\x01\x12\x0b\n\x03low\x18\x04 \x01(\x01\x12\r\n\x05\x63lose\x18\x05 \x01(\x01\x12\x0e\n\x06volume\x18\x06 \x01(\x03\x12\x0f\n\x07openint\x18\x07 \x01(\x03:.\xd8\xed\x1a\x02\xe2\xed\x1a&https://en.wikipedia.org/wiki/ISO_8601*\xae\x01\n\nTermSource\x12\x1b\n\x17TERM_SOURCE_UNSPECIFIED\x10\x00\x12\x13\n\x0fTERM_SOURCE_ONE\x10\x01\x12\x14\n\x10TERM_SOURCE_FIBO\x10\x02\x12\x13\n\x0fTERM_SOURCE_ISO\x10\x03\x12\x18\n\x14TERM_SOURCE_ISO20022\x10\x04\x12\x13\n\x0fTERM_SOURCE_FIX\x10\x05\x12\x14\n\x10TERM_SOURCE_FPML\x10\x06:8\n\rcoding_scheme\x12\x1f.google.protobuf.MessageOptions\x18\xda\xad\x03 \x01(\t:C\n\x0bterm_source\x12\x1f.google.protobuf.MessageOptions\x18\xdb\xad\x03 \x01(\x0e\x32\x0b.TermSource::\n\x0fterm_source_ref\x12\x1f.google.protobuf.MessageOptions\x18\xdc\xad\x03 \x01(\t:8\n\rmsg_term_link\x12\x1f.google.protobuf.MessageOptions\x18\xdd\xad\x03 \x01(\t:6\n\ris_identifier\x12\x1d.google.protobuf.FieldOptions\x18\xc2\xb5\x03 \x01(\x08:8\n\x0f\x65xternal_schema\x12\x1d.google.protobuf.FieldOptions\x18\xc3\xb5\x03 \x01(\t:8\n\x0f\x66ield_term_link\x12\x1d.google.protobuf.FieldOptions\x18\xc4\xb5\x03 \x01(\tb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
_TERMSOURCE = _descriptor.EnumDescriptor(
name='TermSource',
full_name='TermSource',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='TERM_SOURCE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TERM_SOURCE_ONE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TERM_SOURCE_FIBO', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TERM_SOURCE_ISO', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TERM_SOURCE_ISO20022', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TERM_SOURCE_FIX', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TERM_SOURCE_FPML', index=6, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=225,
serialized_end=399,
)
_sym_db.RegisterEnumDescriptor(_TERMSOURCE)
TermSource = enum_type_wrapper.EnumTypeWrapper(_TERMSOURCE)
TERM_SOURCE_UNSPECIFIED = 0
TERM_SOURCE_ONE = 1
TERM_SOURCE_FIBO = 2
TERM_SOURCE_ISO = 3
TERM_SOURCE_ISO20022 = 4
TERM_SOURCE_FIX = 5
TERM_SOURCE_FPML = 6
CODING_SCHEME_FIELD_NUMBER = 55002
coding_scheme = _descriptor.FieldDescriptor(
name='coding_scheme', full_name='coding_scheme', index=0,
number=55002, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
TERM_SOURCE_FIELD_NUMBER = 55003
term_source = _descriptor.FieldDescriptor(
name='term_source', full_name='term_source', index=1,
number=55003, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
TERM_SOURCE_REF_FIELD_NUMBER = 55004
term_source_ref = _descriptor.FieldDescriptor(
name='term_source_ref', full_name='term_source_ref', index=2,
number=55004, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
MSG_TERM_LINK_FIELD_NUMBER = 55005
msg_term_link = _descriptor.FieldDescriptor(
name='msg_term_link', full_name='msg_term_link', index=3,
number=55005, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
IS_IDENTIFIER_FIELD_NUMBER = 56002
is_identifier = _descriptor.FieldDescriptor(
name='is_identifier', full_name='is_identifier', index=4,
number=56002, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
EXTERNAL_SCHEMA_FIELD_NUMBER = 56003
external_schema = _descriptor.FieldDescriptor(
name='external_schema', full_name='external_schema', index=5,
number=56003, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
FIELD_TERM_LINK_FIELD_NUMBER = 56004
field_term_link = _descriptor.FieldDescriptor(
name='field_term_link', full_name='field_term_link', index=6,
number=56004, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
_ETF_HTTP_REF = _descriptor.Descriptor(
name='etf_http_ref',
full_name='etf_http_ref',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='date', full_name='etf_http_ref.date', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='open', full_name='etf_http_ref.open', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='high', full_name='etf_http_ref.high', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='low', full_name='etf_http_ref.low', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='close', full_name='etf_http_ref.close', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='volume', full_name='etf_http_ref.volume', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='openint', full_name='etf_http_ref.openint', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\330\355\032\002\342\355\032&https://en.wikipedia.org/wiki/ISO_8601',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=57,
serialized_end=222,
)
DESCRIPTOR.message_types_by_name['etf_http_ref'] = _ETF_HTTP_REF
DESCRIPTOR.enum_types_by_name['TermSource'] = _TERMSOURCE
DESCRIPTOR.extensions_by_name['coding_scheme'] = coding_scheme
DESCRIPTOR.extensions_by_name['term_source'] = term_source
DESCRIPTOR.extensions_by_name['term_source_ref'] = term_source_ref
DESCRIPTOR.extensions_by_name['msg_term_link'] = msg_term_link
DESCRIPTOR.extensions_by_name['is_identifier'] = is_identifier
DESCRIPTOR.extensions_by_name['external_schema'] = external_schema
DESCRIPTOR.extensions_by_name['field_term_link'] = field_term_link
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
etf_http_ref = _reflection.GeneratedProtocolMessageType('etf_http_ref', (_message.Message,), {
'DESCRIPTOR' : _ETF_HTTP_REF,
'__module__' : 'etf_http_ref_pb2'
# @@protoc_insertion_point(class_scope:etf_http_ref)
})
_sym_db.RegisterMessage(etf_http_ref)
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(coding_scheme)
term_source.enum_type = _TERMSOURCE
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(term_source)
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(term_source_ref)
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(msg_term_link)
google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(is_identifier)
google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(external_schema)
google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(field_term_link)
_ETF_HTTP_REF._options = None
# @@protoc_insertion_point(module_scope)
| 48.651822 | 1,362 | 0.785471 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,567 | 0.213614 |
ff098027575fba502952fc4d4e830126b3044b28 | 3,086 | py | Python | src/access.py | zimingd/packer-rstudio | 935360c55c292b06969fe157da95b74351e4d3c1 | [
"Apache-2.0"
] | null | null | null | src/access.py | zimingd/packer-rstudio | 935360c55c292b06969fe157da95b74351e4d3c1 | [
"Apache-2.0"
] | 7 | 2020-04-14T17:00:48.000Z | 2022-03-03T00:39:21.000Z | src/access.py | zimingd/packer-rstudio | 935360c55c292b06969fe157da95b74351e4d3c1 | [
"Apache-2.0"
] | 4 | 2020-05-20T16:47:22.000Z | 2021-05-25T15:16:00.000Z | #!/usr/bin/env python3
import jwt
import requests
import base64
import json
import boto3
import time
import functools
import os
from mod_python import apache
region = json.loads(requests.get('http://169.254.169.254/latest/dynamic/instance-identity/document').text)['region']
ssm_parameter_name_env_var = 'SYNAPSE_TOKEN_AWS_SSM_PARAMETER_NAME'
kms_alias_env_var = 'KMS_KEY_ALIAS'
def headerparserhandler(req):
jwt_str = req.headers_in['x-amzn-oidc-data'] #proxy.conf ensures this header exists
try:
payload = jwt_payload(jwt_str)
if payload['userid'] == approved_user() and payload['exp'] > time.time():
store_to_ssm(req.headers_in['x-amzn-oidc-accesstoken'])
return apache.OK
else:
return apache.HTTP_UNAUTHORIZED #the userid claim does not match the userid tag
except Exception:
# if the JWT playload is invalid
return apache.HTTP_UNAUTHORIZED
def approved_user():
instance_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text
ec2 = boto3.resource('ec2',region)
vm = ec2.Instance(instance_id)
#TODO handle exception on multiple tags in this list
for tags in vm.tags:
if tags["Key"] == 'Protected/AccessApprovedCaller':
approved_caller = tags["Value"]
return approved_caller.split(':')[1] #return userid portion of tag
# taking advantage of lru cache to avoid re-putting the same access token to
# SSM Parameter Store.
# According to functools source code, arguments (i.e. the access token) are hashed,
# not stored as-is in memory
@functools.lru_cache(maxsize=1)
def store_to_ssm(access_token):
parameter_name = os.environ.get(ssm_parameter_name_env_var)
kms_key_alias = os.environ.get(kms_alias_env_var)
if not (parameter_name):
# just exit early if the parameter name to store in SSM is not found
return
ssm_client = boto3.client('ssm', region)
kms_client = boto3.client('kms', region)
key_id = kms_client.describe_key(KeyId=kms_key_alias)['KeyMetadata']['KeyId']
ssm_client.put_parameter(
Name=parameter_name,
Type='SecureString',
Value=access_token,
KeyId=key_id,
Overwrite=True
)
def jwt_payload(encoded_jwt):
# The x-amzn-oid-data header is a base64-encoded JWT signed by the ALB
# validating the signature of the JWT means the payload is authentic
# per http://docs.aws.amazon.com/elasticloadbalancing/latest/application/listener-authenticate-users.html
# Step 1: Get the key id from JWT headers (the kid field)
#encoded_jwt = headers.dict['x-amzn-oidc-data']
jwt_headers = encoded_jwt.split('.')[0]
decoded_jwt_headers = base64.b64decode(jwt_headers).decode("utf-8")
decoded_json = json.loads(decoded_jwt_headers)
kid = decoded_json['kid']
# Step 2: Get the public key from regional endpoint
pub_key = get_aws_elb_public_key(region, kid)
# Step 3: Get the payload
return jwt.decode(encoded_jwt, pub_key, algorithms=['ES256'])
@functools.lru_cache()
def get_aws_elb_public_key(region, key_id):
url = f'https://public-keys.auth.elb.{region}.amazonaws.com/{key_id}'
return requests.get(url).text
| 33.182796 | 116 | 0.745949 | 0 | 0 | 0 | 0 | 769 | 0.24919 | 0 | 0 | 1,337 | 0.433247 |
ff0b467f1ad7ee9bf9ae9be6fd164aa964a5004d | 289 | py | Python | tests/test_checks_interface.py | ployt0/server_monitor | 835e48ed317b4b069ebd66675ca2d1b3120770c0 | [
"MIT"
] | null | null | null | tests/test_checks_interface.py | ployt0/server_monitor | 835e48ed317b4b069ebd66675ca2d1b3120770c0 | [
"MIT"
] | null | null | null | tests/test_checks_interface.py | ployt0/server_monitor | 835e48ed317b4b069ebd66675ca2d1b3120770c0 | [
"MIT"
] | null | null | null | from checks_interface import deserialise_simple_csv
def test_deserialise_simple_csv():
csv_list = deserialise_simple_csv("yolo,barry white,george soros,tilda swinton,None,bill gates")
assert csv_list == ['yolo', 'barrywhite', 'george soros', 'tilda swinton', None, 'bill gates']
| 41.285714 | 100 | 0.768166 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.415225 |
ff0c20d8ce541a65a62a8c8b06da2596d5632606 | 16,877 | py | Python | perses/tests/test_atom_mapping.py | schallerdavid/perses | 58bd6e626e027879e136f56e175683893e016f8c | [
"MIT"
] | 99 | 2016-01-19T18:10:37.000Z | 2022-03-26T02:43:08.000Z | perses/tests/test_atom_mapping.py | schallerdavid/perses | 58bd6e626e027879e136f56e175683893e016f8c | [
"MIT"
] | 878 | 2015-09-18T19:25:30.000Z | 2022-03-31T02:33:04.000Z | perses/tests/test_atom_mapping.py | schallerdavid/perses | 58bd6e626e027879e136f56e175683893e016f8c | [
"MIT"
] | 30 | 2015-09-21T15:26:35.000Z | 2022-01-10T20:07:24.000Z | import os
import pytest
import unittest
from perses.rjmc.atom_mapping import AtomMapper, AtomMapping, InvalidMappingException
from openff.toolkit.topology import Molecule
################################################################################
# LOGGER
################################################################################
import logging
logging.basicConfig(level = logging.NOTSET)
_logger = logging.getLogger("atom_mapping")
_logger.setLevel(logging.INFO)
################################################################################
# AtomMapping
################################################################################
class TestAtomMapping(unittest.TestCase):
"""Test AtomMapping object."""
def setUp(self):
"""Create useful common objects for testing."""
self.old_mol = Molecule.from_smiles('[C:0]([H:1])([H:2])([H:3])[C:4]([H:5])([H:6])([H:7])') # ethane
self.new_mol = Molecule.from_smiles('[C:0]([H:1])([H:2])([H:3])[C:4]([H:5])([H:6])[O:7][H:8]') # ethanol
self.old_to_new_atom_map = { 0:0, 4:4 }
self.new_to_old_atom_map = dict(map(reversed, self.old_to_new_atom_map.items()))
def test_create(self):
atom_mapping = AtomMapping(self.old_mol, self.old_mol, old_to_new_atom_map=self.old_to_new_atom_map)
assert atom_mapping.old_to_new_atom_map == self.old_to_new_atom_map
assert atom_mapping.new_to_old_atom_map == self.new_to_old_atom_map
assert atom_mapping.n_mapped_atoms == 2
atom_mapping = AtomMapping(self.old_mol, self.old_mol, new_to_old_atom_map=self.new_to_old_atom_map)
assert atom_mapping.old_to_new_atom_map == self.old_to_new_atom_map
assert atom_mapping.new_to_old_atom_map == self.new_to_old_atom_map
assert atom_mapping.n_mapped_atoms == 2
def test_validation_fail(self):
# Empty mapping
with pytest.raises(InvalidMappingException) as excinfo:
atom_mapping = AtomMapping(self.old_mol, self.new_mol, { })
# Non-integers
with pytest.raises(InvalidMappingException) as excinfo:
atom_mapping = AtomMapping(self.old_mol, self.new_mol, { 0:0, 4:4, 5:'a' })
# Invalid atom indices
with pytest.raises(InvalidMappingException) as excinfo:
atom_mapping = AtomMapping(self.old_mol, self.new_mol, { 0:0, 4:4, 9:9 })
# Duplicated atom indices
with pytest.raises(InvalidMappingException) as excinfo:
atom_mapping = AtomMapping(self.old_mol, self.new_mol, { 0:0, 4:4, 3:4 })
def test_set_and_get_mapping(self):
atom_mapping = AtomMapping(self.old_mol, self.old_mol, old_to_new_atom_map=self.old_to_new_atom_map)
# Set old-to-new map
atom_mapping.old_to_new_atom_map = self.old_to_new_atom_map
assert atom_mapping.old_to_new_atom_map == self.old_to_new_atom_map
assert atom_mapping.new_to_old_atom_map == self.new_to_old_atom_map
# Set new-to-old map
atom_mapping.new_to_old_atom_map = self.new_to_old_atom_map
assert atom_mapping.old_to_new_atom_map == self.old_to_new_atom_map
assert atom_mapping.new_to_old_atom_map == self.new_to_old_atom_map
def test_repr(self):
atom_mapping = AtomMapping(self.old_mol, self.old_mol, old_to_new_atom_map=self.old_to_new_atom_map)
repr(atom_mapping)
def test_str(self):
atom_mapping = AtomMapping(self.old_mol, self.old_mol, old_to_new_atom_map=self.old_to_new_atom_map)
str(atom_mapping)
def test_render_image(self):
import tempfile
atom_mapping = AtomMapping(self.old_mol, self.old_mol, old_to_new_atom_map=self.old_to_new_atom_map)
for suffix in ['.pdf', '.png', '.svg']:
with tempfile.NamedTemporaryFile(suffix=suffix) as tmpfile:
atom_mapping.render_image(tmpfile.name)
def test_ring_breaking_detection(self):
# Test simple ethane -> ethanol transformation
atom_mapping = AtomMapping(self.old_mol, self.old_mol, old_to_new_atom_map=self.old_to_new_atom_map)
assert atom_mapping.creates_or_breaks_rings() == False
# Define benzene -> napthalene transformation
old_mol = Molecule.from_smiles('[c:0]1[c:1][c:2][c:3][c:4][c:5]1') # benzene
new_mol = Molecule.from_smiles('[c:0]12[c:1][c:2][c:3][c:4][c:5]2[c:6][c:7][c:8][c:9]1') # napthalene
old_to_new_atom_map = { 0:0, 1:1, 2:2, 3:3, 4:4, 5:5 }
new_to_old_atom_map = dict(map(reversed, self.old_to_new_atom_map.items()))
atom_mapping = AtomMapping(old_mol, new_mol, old_to_new_atom_map=old_to_new_atom_map)
assert atom_mapping.creates_or_breaks_rings() == True
def test_unmap_partially_mapped_cycles(self):
# Test simple ethane -> ethanol transformation
atom_mapping = AtomMapping(self.old_mol, self.old_mol, old_to_new_atom_map=self.old_to_new_atom_map)
n_mapped_atoms_old = atom_mapping.n_mapped_atoms
atom_mapping.unmap_partially_mapped_cycles()
assert atom_mapping.n_mapped_atoms == n_mapped_atoms_old
# Test methyl-cyclohexane -> methyl-cyclopentane, demapping the ring transformation
old_mol = Molecule.from_smiles('[C:0][C:1]1[C:2][C:3][C:4][C:5][C:6]1') # methyl-cyclohexane
new_mol = Molecule.from_smiles('[C:0][C:1]1[C:2][C:3][C:4][C:5]1') # methyl-cyclopentane
old_to_new_atom_map = { 0:0, 1:1, 2:2, 3:3, 5:4, 6:5 }
new_to_old_atom_map = dict(map(reversed, self.old_to_new_atom_map.items()))
atom_mapping = AtomMapping(old_mol, new_mol, old_to_new_atom_map=old_to_new_atom_map)
assert atom_mapping.creates_or_breaks_rings() == True
atom_mapping.unmap_partially_mapped_cycles()
assert atom_mapping.old_to_new_atom_map == {0:0} # only methyl group should remain mapped
def test_preserve_chirality(self):
# Test simple ethane -> ethanol transformation
atom_mapping = AtomMapping(self.old_mol, self.old_mol, old_to_new_atom_map=self.old_to_new_atom_map)
n_mapped_atoms_old = atom_mapping.n_mapped_atoms
atom_mapping.preserve_chirality()
assert atom_mapping.n_mapped_atoms == n_mapped_atoms_old
# Test resolution of incorrect stereochemistry
old_mol = Molecule.from_smiles('[C@H:0]([Cl:1])([Br:2])([F:3])')
new_mol = Molecule.from_smiles('[C@@H:0]([Cl:1])([Br:2])([F:3])')
atom_mapping = AtomMapping(old_mol, new_mol, old_to_new_atom_map={0:0, 1:1, 2:2, 3:3})
atom_mapping.preserve_chirality()
assert atom_mapping.old_to_new_atom_map == {0:0, 1:1, 2:2, 3:3} # TODO: Check this
################################################################################
# AtomMapper
################################################################################
class TestAtomMapper(unittest.TestCase):
def setUp(self):
self.molecules = dict()
for dataset_name in ['CDK2', 'p38', 'Tyk2', 'Thrombin', 'PTP1B', 'MCL1', 'Jnk1', 'Bace']:
# Read molecules
from pkg_resources import resource_filename
dataset_path = 'data/schrodinger-jacs-datasets/%s_ligands.sdf' % dataset_name
sdf_filename = resource_filename('perses', dataset_path)
self.molecules[dataset_name] = Molecule.from_file(sdf_filename, allow_undefined_stereo=True)
def test_molecular_atom_mapping(self):
"""Test the creation of atom maps between pairs of molecules from the JACS benchmark set.
"""
for use_positions in [True, False]:
for allow_ring_breaking in [True, False]:
# Create and configure an AtomMapper
atom_mapper = AtomMapper(use_positions=use_positions, allow_ring_breaking=allow_ring_breaking)
# Test mappings for JACS dataset ligands
# TODO: Uncomment other test datasets
for dataset_name in ['CDK2', 'p38', 'Tyk2', 'Thrombin', 'PTP1B', 'MCL1', 'Jnk1', 'Bace']:
molecules = self.molecules[dataset_name]
# Build atom map for some transformations.
#from itertools import combinations
#for old_index, new_index in combinations(range(len(molecules)), 2): # exhaustive test is too slow
old_index = 0
for new_index in range(1, len(molecules), 3): # skip every few molecules to keep test times down
try:
atom_mapping = atom_mapper.get_best_mapping(molecules[old_index], molecules[new_index])
# TODO: Perform quality checks
# Render mapping for visual inspection
#filename = f'mapping-{dataset_name}-use_positions={use_positions}-allow_ring_breaking={allow_ring_breaking}-{old_index}-to-{new_index}.png'
#atom_mapping.render_image(filename)
except Exception as e:
e.args += (f'Exception encountered for {dataset_name} use_positions={use_positions} allow_ring_breaking={allow_ring_breaking}: {old_index} {molecules[old_index]}-> {new_index} {molecules[new_index]}', )
raise e
def test_map_strategy(self):
"""
Test the creation of atom maps between pairs of molecules from the JACS benchmark set.
"""
# Create and configure an AtomMapper
from openeye import oechem
atom_expr = oechem.OEExprOpts_IntType
bond_expr = oechem.OEExprOpts_RingMember
atom_mapper = AtomMapper(atom_expr=atom_expr, bond_expr=bond_expr)
# Test mappings for JACS dataset ligands
for dataset_name in ['Jnk1']:
molecules = self.molecules[dataset_name]
# Jnk1 ligands 0 and 2 have meta substituents that face opposite each other in the active site.
# When ignoring position information, the mapper should align these groups, and put them both in the core.
# When using position information, the mapper should see that the orientations differ and chose
# to unmap (i.e. put both these groups in core) such as to get the geometry right at the expense of
# mapping fewer atoms
# Ignore positional information when scoring mappings
atom_mapper.use_positions = False
atom_mapping = atom_mapper.get_best_mapping(molecules[0], molecules[2])
#assert len(atom_mapping.new_to_old_atom_map) == 36, f'Expected meta groups methyl C to map onto ethyl O\n{atom_mapping}' # TODO
# Use positional information to score mappings
atom_mapper.use_positions = True
atom_mapping = atom_mapper.get_best_mapping(molecules[0], molecules[2])
#assert len(atom_mapping.new_to_old_atom_map) == 35, f'Expected meta groups methyl C to NOT map onto ethyl O as they are distal in cartesian space\n{atom_mapping}' # TODO
def test_generate_atom_mapping_from_positions(self):
"""
Test the generation of atom mappings from positions on JACS set compounds
"""
# Create and configure an AtomMapper
atom_mapper = AtomMapper()
# Exclude datasets that contain displaced ligands:
# 'p38', 'PTP1B', 'MCL1',
for dataset_name in ['CDK2', 'Tyk2', 'Thrombin', 'Jnk1', 'Bace']:
molecules = self.molecules[dataset_name]
reference_molecule = molecules[0]
for index, target_molecule in enumerate(molecules):
# Explicitly construct mapping from positional information alone
try:
atom_mapping = atom_mapper.generate_atom_mapping_from_positions(reference_molecule, target_molecule)
except InvalidMappingException as e:
e.args = e.args + (f'dataset: {dataset_name}: molecule 0 -> {index}',)
raise e
def test_atom_mappings_moonshot(self):
"""
Test the generation of atom mappings on COVID Moonshot compounds
"""
# Create and configure an AtomMapper
atom_mapper = AtomMapper()
# Load molecules with positions
from pkg_resources import resource_filename
dataset_path = 'data/covid-moonshot/sprint-10-2021-07-26-x10959-dimer-neutral.sdf.gz'
sdf_filename = resource_filename('perses', dataset_path)
molecules = Molecule.from_file(sdf_filename)
# Take a subset
nskip = 20
molecules = molecules[::nskip]
# Test geometry-derived mappings
reference_molecule = molecules[0]
for index, molecule in enumerate(molecules):
# Ignore positional information when scoring mappings
atom_mapper.use_positions = False
atom_mapping = atom_mapper.get_best_mapping(molecules[0], molecules[2])
#assert len(atom_mapping.new_to_old_atom_map) == 36, f'Expected meta groups methyl C to map onto ethyl O\n{atom_mapping}' # TODO
# Use positional information to score mappings
atom_mapper.use_positions = True
atom_mapping = atom_mapper.get_best_mapping(molecules[0], molecules[2])
#assert len(atom_mapping.new_to_old_atom_map) == 35, f'Expected meta groups methyl C to NOT map onto ethyl O as they are distal in cartesian space\n{atom_mapping}' # TODO
# Explicitly construct mapping from positional information alone
atom_mapping = atom_mapper.generate_atom_mapping_from_positions(reference_molecule, molecule)
def test_simple_heterocycle_mapping(self):
"""
Test the ability to map conjugated heterocycles (that preserves all rings). Will assert that the number of ring members in both molecules is the same.
"""
# TODO: generalize this to test for ring breakage and closure.
iupac_pairs = [
('benzene', 'pyridine')
]
# Create and configure an AtomMapper
atom_mapper = AtomMapper(allow_ring_breaking=False)
for old_iupac, new_iupac in iupac_pairs:
old_mol = Molecule.from_iupac(old_iupac)
new_mol = Molecule.from_iupac(new_iupac)
atom_mapping = atom_mapper.get_best_mapping(old_mol, new_mol)
assert len(atom_mapping.old_to_new_atom_map) > 0
def test_mapping_strength_levels(self):
"""Test the mapping strength defaults work as expected"""
# SMILES pairs to test mappings
tests = [
('c1ccccc1', 'C1CCCCC1', {'default': 0, 'weak' : 6, 'strong' : 0}), # benzene -> cyclohexane
('CNC1CCCC1', 'CNC1CCCCC1', {'default': 6, 'weak' : 6, 'strong' : 6}), # https://github.com/choderalab/perses/issues/805#issue-913932127
('c1ccccc1CNC2CCC2', 'c1ccccc1CNCC2CCC2', {'default': 13, 'weak' : 13, 'strong' : 11}), # https://github.com/choderalab/perses/issues/805#issue-913932127
('Cc1ccccc1','c1ccc(cc1)N', {'default': 12, 'weak' : 12, 'strong' : 11}),
('CC(c1ccccc1)','O=C(c1ccccc1)', {'default': 13, 'weak' : 14, 'strong' : 11}),
('Oc1ccccc1','Sc1ccccc1', {'default': 12, 'weak' : 12, 'strong' : 11}),
]
DEBUG_MODE = True # If True, don't fail, but print results of tests for calibration
for mol1_smiles, mol2_smiles, expected_results in tests:
for map_strength, expected_n_mapped_atoms in expected_results.items():
# Create OpenFF Molecule objects
mol1 = Molecule.from_smiles(mol1_smiles)
mol2 = Molecule.from_smiles(mol2_smiles)
# Initialize the atom mapper with the requested mapping strength
atom_mapper = AtomMapper(map_strength=map_strength, allow_ring_breaking=False)
# Create the atom mapping
atom_mapping = atom_mapper.get_best_mapping(mol1, mol2)
if DEBUG_MODE:
if atom_mapping is not None:
_logger.info(f'{mol1_smiles} -> {mol2_smiles} using map strength {map_strength} : {atom_mapping.n_mapped_atoms} atoms mapped : {atom_mapping.old_to_new_atom_map}')
atom_mapping.render_image(f'test_mapping_strength_levels:{mol1_smiles}:{mol2_smiles}:{map_strength}.png')
else:
_logger.info(f'{mol1_smiles} -> {mol2_smiles} using map strength {map_strength} : {atom_mapping}')
else:
# Check that expected number of mapped atoms are provided
n_mapped_atoms = 0
if atom_mapping is not None:
n_mapped_atoms = atom_mapping.n_mapped_atoms
assert n_mapped_atoms==expected_n_mapped_atoms, "Number of mapped atoms does not match hand-calibrated expectation"
| 54.092949 | 230 | 0.642591 | 16,041 | 0.950465 | 0 | 0 | 0 | 0 | 0 | 0 | 6,126 | 0.362979 |
ff0c43edc85bb45d1c3abc800ebe6bb6c12cb224 | 1,630 | py | Python | app/api.py | amelie-fri/munch-api | cbb205acbb5b1a107862cd8a53197de5317a26e4 | [
"MIT"
] | 2 | 2020-09-21T08:22:11.000Z | 2020-09-22T07:58:16.000Z | app/api.py | amelie-fri/munch-api | cbb205acbb5b1a107862cd8a53197de5317a26e4 | [
"MIT"
] | null | null | null | app/api.py | amelie-fri/munch-api | cbb205acbb5b1a107862cd8a53197de5317a26e4 | [
"MIT"
] | 1 | 2020-09-25T07:19:37.000Z | 2020-09-25T07:19:37.000Z | from flask import Flask, request, jsonify
from flask_restful import Resource, Api
from TeiParser import Family
from dataManager import parentManager
import os
# path to "_data/N" folder
path_N = os.path.join("_data", "N")
# create the parent manager
pm_N = parentManager(path_N)
# create the Flask application
app = Flask(__name__)
api = Api(app)
# returns array of parent filenames
class MunchParents(Resource):
def get(self):
return {"data": pm_N.parents}
# returns content of the parents
class MunchFamily(Resource):
def get(self, _file):
if _file in pm_N.parents:
family = Family(os.path.join(path_N, _file))
resp = {
"title": family.data.title,
"type_text": family.data.type,
"date": {"when": [], "from": [], "to": []},
"text": "",
}
_text = []
for child in family.children:
for item in child.date:
# item when, to, or from
if child.date[item]:
resp["date"][item].append(child.date[item])
_text.append(child.text)
# access text key in python dictionary
# join items from list _text with new lines
resp["text"] = "\n\n".join(_text)
# print(resp["text"])
return resp, 200
else:
# If file is not found
return {"notFound": _file}, 404
api.add_resource(MunchParents, "/N")
api.add_resource(MunchFamily, "/N/<_file>")
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0")
| 28.596491 | 67 | 0.564417 | 1,056 | 0.647853 | 0 | 0 | 0 | 0 | 0 | 0 | 425 | 0.260736 |
ff0c4addd4684de4de158e744b10372828e5a73b | 99 | py | Python | globals/__init__.py | anmartinezs/pyseg | f991d8826e8d4e1eff70064183cb79425b7e9109 | [
"Apache-2.0"
] | 1 | 2018-09-11T17:10:52.000Z | 2018-09-11T17:10:52.000Z | globals/__init__.py | anmartinezs/pyseg | f991d8826e8d4e1eff70064183cb79425b7e9109 | [
"Apache-2.0"
] | null | null | null | globals/__init__.py | anmartinezs/pyseg | f991d8826e8d4e1eff70064183cb79425b7e9109 | [
"Apache-2.0"
] | null | null | null | __author__ = 'martinez'
import vtk
import numpy as np
from variables import *
from utils import * | 14.142857 | 23 | 0.767677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.10101 |
ff0cf49c7c7632c23c40457718207c17bbdfabac | 1,053 | py | Python | LeetCode/Linked List/23. Merge k Sorted Lists/solution.py | Ceruleanacg/Crack-Interview | 994dc0eee2f576fc543c90b82398dc8d957cdf09 | [
"MIT"
] | 17 | 2018-09-04T15:51:30.000Z | 2021-06-04T08:47:07.000Z | LeetCode/Linked List/23. Merge k Sorted Lists/solution.py | Ceruleanacg/Crack-Interview | 994dc0eee2f576fc543c90b82398dc8d957cdf09 | [
"MIT"
] | null | null | null | LeetCode/Linked List/23. Merge k Sorted Lists/solution.py | Ceruleanacg/Crack-Interview | 994dc0eee2f576fc543c90b82398dc8d957cdf09 | [
"MIT"
] | 6 | 2018-11-03T09:36:25.000Z | 2020-05-27T17:51:08.000Z | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def mergeKLists(self, lists: list):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
if len(lists) == 0:
return None
if len(lists) == 1:
return lists[0]
node_result = self.merge_two_sorted_lists(lists.pop(), lists.pop())
while lists:
node_result = self.merge_two_sorted_lists(node_result, lists.pop())
return node_result
def merge_two_sorted_lists(self, node_a, node_b):
head_node = ListNode(-1)
cur_node = head_node
while node_a and node_b:
if node_a.val < node_b.val:
cur_node.next = node_a
node_a = node_a.next
else:
cur_node.next = node_b
node_b = node_b.next
cur_node = cur_node.next
cur_node.next = node_a or node_b
return head_node.next
| 26.325 | 79 | 0.555556 | 1,012 | 0.961064 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.106363 |
ff0e0ab82a47b14a5a9f6d4e884685c57fad317e | 1,391 | py | Python | Code/Python/ImageProcessing/RadialTransform/bSpline_test.py | Nailim/shuttler | a12ea89a1c6b289079ce61ebf8bf3361696f10b2 | [
"MIT"
] | null | null | null | Code/Python/ImageProcessing/RadialTransform/bSpline_test.py | Nailim/shuttler | a12ea89a1c6b289079ce61ebf8bf3361696f10b2 | [
"MIT"
] | null | null | null | Code/Python/ImageProcessing/RadialTransform/bSpline_test.py | Nailim/shuttler | a12ea89a1c6b289079ce61ebf8bf3361696f10b2 | [
"MIT"
] | null | null | null | # this resizes __1.jpt to x it's original size & it turns it grayscale
import cv
import numpy
import bSpline
if __name__ == "__main__": # this is not a module
scale = 10
# load image
#cv_img = cv.LoadImage("__1.jpg", cv.CV_LOAD_IMAGE_GRAYSCALE) # CV_LOAD_IMAGE_GRAYSCALE
cv_img = cv.LoadImage("__1.jpg", cv.CV_LOAD_IMAGE_UNCHANGED) # CV_LOAD_IMAGE_UNCHANGED
# width & height
cv_img_width = cv.GetSize(cv_img)[0]
cv_img_height = cv.GetSize(cv_img)[1]
img_tpl = numpy.zeros( ((cv_img_height * scale),(cv_img_width * scale),2) )
for h in range(0,(cv_img_height * scale),1) :
for w in range(0,(cv_img_width * scale),1) :
img_tpl[h][w][0] = (h + 0) / (cv_img_height * scale * 1.0) * cv_img_height
img_tpl[h][w][1] = (w + 0) / (cv_img_width * scale * 1.0) * cv_img_width
##bSpl = bSpline.BSpline() # v4.0
# single picture
##cv_img_out = bSpl.cubic(cv_img, img_tpl) # v4.0
#cv_img_out = bSpline.cubic(cv_img, img_tpl)
#cv.SaveImage("out__1.jpg", cv_img_out)
# multiple pictures
img_beta_f = bSpline.cubic_getBeta(cv_img, img_tpl)
cv_img_out = bSpline.cubic_setBeta(cv_img, img_tpl, img_beta_f)
cv.SaveImage("out__1.01.jpg", cv_img_out)
#cv_img_out = bSpl.cubic_setBeta(cv_img, img_tpl, img_beta_f)
#cv.SaveImage("out__1.02.jpg", cv_img_out)
#cv_img_out = bSpl.cubic_setBeta(cv_img, img_tpl, img_beta_f)
#cv.SaveImage("out__1.03.jpg", cv_img_out)
| 33.119048 | 88 | 0.710999 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 672 | 0.483106 |
ff0e759a560e8f0f6ef4777c4576a5ba668deba3 | 354 | py | Python | crawler/crawling/items.py | zookeeperss/scrapy-cluster | afb7be0ff8c272691761da01ef28172bad864f9b | [
"MIT"
] | null | null | null | crawler/crawling/items.py | zookeeperss/scrapy-cluster | afb7be0ff8c272691761da01ef28172bad864f9b | [
"MIT"
] | null | null | null | crawler/crawling/items.py | zookeeperss/scrapy-cluster | afb7be0ff8c272691761da01ef28172bad864f9b | [
"MIT"
] | 1 | 2020-07-26T08:24:03.000Z | 2020-07-26T08:24:03.000Z | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
from scrapy import Item, Field
class RawResponseItem(Item):
appid = Field()
crawlid = Field()
url = Field()
response_url = Field()
status_code = Field()
status_msg = Field()
headers = Field()
body = Field()
links = Field()
attrs = Field()
| 19.666667 | 47 | 0.610169 | 247 | 0.69774 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.19774 |
ff0ec8bcc5fb92da0704cb54de56155385f7c9bc | 1,459 | py | Python | examples/c/cdecl.py | rakati/ppci-mirror | 8f5b0282fd1122d7c389b39c86fcf5d9352b7bb2 | [
"BSD-2-Clause"
] | 161 | 2020-05-31T03:29:42.000Z | 2022-03-07T08:36:19.000Z | examples/c/cdecl.py | rakati/ppci-mirror | 8f5b0282fd1122d7c389b39c86fcf5d9352b7bb2 | [
"BSD-2-Clause"
] | 74 | 2020-05-26T18:05:48.000Z | 2021-02-13T21:55:39.000Z | examples/c/cdecl.py | rakati/ppci-mirror | 8f5b0282fd1122d7c389b39c86fcf5d9352b7bb2 | [
"BSD-2-Clause"
] | 19 | 2020-05-27T19:22:11.000Z | 2022-02-17T18:53:52.000Z | """ Implement alike logic as is done on www.cdecl.org
Try for example:
$ cdelc.py 'char **a;'
"""
import argparse
import io
from ppci.api import get_current_arch
from ppci.lang.c import CLexer, CParser, COptions, CContext, CSemantics
from ppci.lang.c.nodes import types, declarations
from ppci.lang.c.preprocessor import prepare_for_parsing
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('source', type=str)
args = parser.parse_args()
# print('Source:', args.source)
# Parse into ast:
arch = get_current_arch()
coptions = COptions()
ccontext = CContext(coptions, arch.info)
semantics = CSemantics(ccontext)
cparser = CParser(coptions, semantics)
clexer = CLexer(COptions())
f = io.StringIO(args.source)
tokens = clexer.lex(f, '<snippet>')
tokens = prepare_for_parsing(tokens, cparser.keywords)
cparser.init_lexer(tokens)
semantics.begin()
decl = cparser.parse_declarations()[0]
# Explain:
def explain(x):
if isinstance(x, declarations.VariableDeclaration):
return '{} is {}'.format(x.name, explain(x.typ))
elif isinstance(x, types.PointerType):
return 'a pointer to {}'.format(explain(x.element_type))
elif isinstance(x, types.ArrayType):
return 'an array of {}'.format(explain(x.element_type))
elif isinstance(x, types.BasicType):
return '{}'.format(x.type_id)
else:
print('???', x)
print(explain(decl))
| 29.18 | 78 | 0.727896 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.156957 |
ff0eeef238c81de1bf1340b33fe05aefc2ffa217 | 1,462 | py | Python | minder_lastreview.py | hodea/hodea-review-minder | 6fff883c9b521be59c6d996edeafa25074be7a21 | [
"MIT"
] | null | null | null | minder_lastreview.py | hodea/hodea-review-minder | 6fff883c9b521be59c6d996edeafa25074be7a21 | [
"MIT"
] | 27 | 2017-12-13T19:10:36.000Z | 2018-11-20T09:21:23.000Z | minder_lastreview.py | hodea/hodea-review-minder | 6fff883c9b521be59c6d996edeafa25074be7a21 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 26 19:38:29 2018
@author: Daniel
"""
import argparse
import os
from minder_config import minder_cfg
from minder_database import minder_db
from minder_htmlreport import minder_report
import time
import hashlib
import uuid
class get_lastreview:
def __init__(self, minder_dict, topdir, cfg_exclude, cfg_type):
print(topdir)
print(cfg_exclude)
print(cfg_type)
print(minder_dict)
filecnt = 0
for root, dirs, files in os.walk(topdir):
for name in files:
find = False
for i in range(0,len(cfg_exclude)):
if os.path.join(root, name).startswith(os.path.dirname(cfg_exclude[i])):
find = True
if find is True:
continue
for j in range(0,len(cfg_type)):
if name.lower().endswith(cfg_type[j]):
try:
flog = open(os.path.join(root, name), "rb")
flog.close()
flog = open(os.path.join(root, name), 'r+')
flog.close()
filecnt+=1
except:
print("ERROR: No Access to: " + os.path.join(root, name))
raise Exception
print(filecnt)
| 29.836735 | 92 | 0.487004 | 1,179 | 0.80643 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.077975 |
ff0efbaac9950eb55c068dc1d25ca25669b5b53d | 189 | py | Python | build/lib/NaMAZU/onnx_api/__init__.py | NMZ0429/NaMAZU | 46ac3a5fab6fc21bbef323e16daadfd4111e2e68 | [
"Apache-2.0"
] | 5 | 2021-09-22T20:17:22.000Z | 2021-11-26T07:09:18.000Z | build/lib/NaMAZU/onnx_api/__init__.py | NMZ0429/NaMAZU | 46ac3a5fab6fc21bbef323e16daadfd4111e2e68 | [
"Apache-2.0"
] | null | null | null | build/lib/NaMAZU/onnx_api/__init__.py | NMZ0429/NaMAZU | 46ac3a5fab6fc21bbef323e16daadfd4111e2e68 | [
"Apache-2.0"
] | null | null | null | __all__ = ["MiDASInference", "U2NetInference", "RealESRGANInference"]
from .midas import MiDASInference
from .segmentation import U2NetInference
from .real_esr import RealESRGANInference
| 27 | 69 | 0.825397 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.280423 |
ff109f1e8d0d2b14f171554da2663b60fd683a02 | 495 | py | Python | practice/aboutfunctions.py | mrElnekave/Hallow-Valley | 6c3ba0dc3932839941a00362da0212850b2b20a6 | [
"MIT"
] | null | null | null | practice/aboutfunctions.py | mrElnekave/Hallow-Valley | 6c3ba0dc3932839941a00362da0212850b2b20a6 | [
"MIT"
] | null | null | null | practice/aboutfunctions.py | mrElnekave/Hallow-Valley | 6c3ba0dc3932839941a00362da0212850b2b20a6 | [
"MIT"
] | null | null | null | def create_path(path:str):
"""
:param path:path is the relative path from the pixel images folder
:return: return the relative path from roots of project
"""
return current_path + path
#a function name is before the parameters and after the def
#function parameters: the values that the function knows, inside the parantheses
#function typehinting: tells the code that it should be a string...
#docstrings: tells what the function does, what parameters are, what it returns | 49.5 | 80 | 0.753535 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 428 | 0.864646 |
ff12374036cabc8d3ecc65f6a2e6dd1c7c2493d3 | 5,171 | py | Python | tex2ebook.py | rzoller/tex2ebook | 57859343e2e4fd31a5701ee834019a5e7b9e8128 | [
"Apache-2.0"
] | 13 | 2015-01-03T13:07:07.000Z | 2017-01-03T16:06:28.000Z | tex2ebook.py | rkaravia/tex2ebook | 57859343e2e4fd31a5701ee834019a5e7b9e8128 | [
"Apache-2.0"
] | 1 | 2020-11-05T13:31:02.000Z | 2020-11-05T13:31:03.000Z | tex2ebook.py | rzoller/tex2ebook | 57859343e2e4fd31a5701ee834019a5e7b9e8128 | [
"Apache-2.0"
] | 6 | 2015-03-30T05:13:25.000Z | 2019-05-16T14:05:03.000Z | # run with --help to see available options
import os, sys, tempfile, shutil, re
from optparse import OptionParser
log_dir = os.path.abspath('_log')
def get_working_dir(texfile, log):
if log:
# create a subdirectory in _log
if not os.path.exists(log_dir):
os.makedirs(log_dir)
subdir = os.path.join(log_dir, '%s-files' % os.path.splitext(os.path.basename(texfile))[0])
working_dir = os.path.join(log_dir, subdir)
if os.path.exists(working_dir):
shutil.rmtree(working_dir)
os.mkdir(working_dir)
return working_dir
else:
# create a temporary directory in system tmp
return tempfile.mkdtemp()
# convert all files listed in indexfile
def batch(indexfile, log, ebook_ext):
print "--- Using batch file %s" % indexfile
indexroot = os.path.abspath(os.path.dirname(indexfile))
for texfilerel in open(indexfile):
texfile = os.path.join(indexroot, texfilerel.strip())
convert(texfile, log, ebook_ext)
# convert a single file
def convert(texfile, log, ebook_ext, dest=None):
print "--- Converting file %s" % texfile
basename = os.path.basename(texfile)
title = os.path.splitext(basename)[0]
working_dir = get_working_dir(texfile, log)
print "--- Working dir is %s" % working_dir
os.chdir(os.path.join('./', os.path.dirname(texfile)))
html = os.path.join(working_dir, '%s.html' % title)
log_hevea = os.path.join(working_dir, 'hevea.log')
hevea = 'hevea %s -o %s >> %s' % (basename, html, log_hevea)
print "--- Invoking hevea..."
print hevea
os.system(hevea)
os.system('bibhva %s >> %s' % (os.path.join(working_dir, title), log_hevea))
os.system(hevea)
os.system(hevea)
imagen = 'imagen -pdf %s >> %s' % (os.path.join(working_dir, title), log_hevea)
print "--- Invoking imagen..."
print imagen
os.system(imagen)
if dest == None:
dest = '%s.%s' % (title, ebook_ext)
# add extension specific options
ext_options = ''
if ebook_ext == 'epub':
ext_options = '--no-default-epub-cover'
log_ebook = os.path.join(working_dir, 'ebook-convert.log')
ebookconvert = 'ebook-convert %s %s %s --page-breaks-before / --toc-threshold 0 --level1-toc //h:h2 --level2-toc //h:h3 --level3-toc //h:h4 >> %s' % (html, dest, ext_options, log_ebook)
print "--- Invoking ebook-convert..."
print ebookconvert
os.system(ebookconvert)
print "--- Result written to %s" % dest
# convert equations to images
# added 25.04.2013 ML
# infos de http://webcache.googleusercontent.com/search?q=cache:V3iGRJDdHDIJ:comments.gmane.org/gmane.comp.tex.hevea/192+&cd=3&hl=en&ct=clnk&client=firefox-a
# fonction pompée de http://stackoverflow.com/questions/39086/search-and-replace-a-line-in-a-file-in-python$
# http://en.wikibooks.org/wiki/LaTeX/Mathematics
def equ_to_images(texfile):
print "--- Converting equations to images for file %s" % texfile
(head, tail) = os.path.split(texfile)
(root, ext) = os.path.splitext(tail)
new_root = '%s_eq_to_images' % root
new_texfile = os.path.join(head, new_root + ext)
new_file = open(new_texfile, 'w')
old_file = open(texfile)
# define new environment
new_file.write('\\newenvironment{equ_to_image}{\\begin{toimage}\\(}{\\)\\end{toimage}\\imageflush}')
for line in old_file:
new_line = line
# replace all possible equation start and end tags by new environment tags (only $ and $$ are not replaced)
new_line = new_line.replace('\\(', '\\begin{equ_to_image}')
new_line = new_line.replace('\\begin{math}', '\\begin{equ_to_image}')
new_line = new_line.replace('\\[', '\\begin{equ_to_image}')
new_line = new_line.replace('\\begin{displaymath}', '\\begin{equ_to_image}')
new_line = new_line.replace('\\begin{equation}', '\\begin{equ_to_image}')
new_line = new_line.replace('\\)', '\\end{equ_to_image}')
new_line = new_line.replace('\\end{math}', '\\end{equ_to_image}')
new_line = new_line.replace('\\]', '\\end{equ_to_image}')
new_line = new_line.replace('\\end{displaymath}', '\\end{equ_to_image}')
new_line = new_line.replace('\\end{equation}', '\\end{equ_to_image}')
new_file.write(new_line)
#close temp file
new_file.close()
old_file.close()
return new_texfile
usage = "usage: %prog [options] file"
parser = OptionParser(usage=usage)
parser.add_option("-l", "--log", action="store_true", dest="log", default=False, help="keep the intermediate files")
parser.add_option("-b", "--batch", action="store_true", dest="batch", default=False, help="process several files in batch mode")
parser.add_option("-k", "--kindle", action="store_true", dest="kindle", default=False, help="convert to MOBI rather than EPUB (default)")
parser.add_option("-i", "--equ_to_images", action="store_true", dest="images", default=False, help="convert equations to images")
parser.add_option("-o", "--output", dest="outfile", help="output filename")
(options, params) = parser.parse_args()
if options.kindle:
ext = 'mobi'
else:
ext = 'epub'
if len(params) == 0:
print "No file specified!"
else:
if options.batch:
batch(params[-1], options.log, ext)
else:
texfile = params[-1]
if options.images:
texfile = equ_to_images(texfile)
if options.outfile == None:
convert(texfile, options.log, ext)
else:
convert(texfile, options.log, ext, os.path.abspath(options.outfile)) | 39.776923 | 186 | 0.704699 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,062 | 0.398531 |
ff1501dcd7b3dad8c0c13f61bf195ed160da427f | 4,544 | py | Python | pyfritzhome/devicetypes/fritzhomedevicethermostat.py | Gezzo42/python-fritzhome | 2edbd521163b9f9477400f4646c13df9ddc73db5 | [
"MIT"
] | null | null | null | pyfritzhome/devicetypes/fritzhomedevicethermostat.py | Gezzo42/python-fritzhome | 2edbd521163b9f9477400f4646c13df9ddc73db5 | [
"MIT"
] | null | null | null | pyfritzhome/devicetypes/fritzhomedevicethermostat.py | Gezzo42/python-fritzhome | 2edbd521163b9f9477400f4646c13df9ddc73db5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
from .fritzhomedevicebase import FritzhomeDeviceBase
from .fritzhomedevicefeatures import FritzhomeDeviceFeatures
_LOGGER = logging.getLogger(__name__)
class FritzhomeDeviceThermostat(FritzhomeDeviceBase):
"""The Fritzhome Device class."""
actual_temperature = None
target_temperature = None
eco_temperature = None
comfort_temperature = None
device_lock = None
lock = None
error_code = None
battery_low = None
battery_level = None
window_open = None
summer_active = None
holiday_active = None
nextchange_endperiod = None
nextchange_temperature = None
def _update_from_node(self, node):
super()._update_from_node(node)
if self.present is False:
return
if self.has_thermostat:
self._update_hkr_from_node(node)
# Thermostat
@property
def has_thermostat(self):
"""Check if the device has thermostat function."""
return self._has_feature(FritzhomeDeviceFeatures.THERMOSTAT)
def _update_hkr_from_node(self, node):
hkr_element = node.find("hkr")
try:
self.actual_temperature = self.get_temp_from_node(hkr_element, "tist")
except ValueError:
pass
self.target_temperature = self.get_temp_from_node(hkr_element, "tsoll")
self.eco_temperature = self.get_temp_from_node(hkr_element, "absenk")
self.comfort_temperature = self.get_temp_from_node(hkr_element, "komfort")
# optional value
try:
self.device_lock = self.get_node_value_as_int_as_bool(
hkr_element, "devicelock"
)
self.lock = self.get_node_value_as_int_as_bool(hkr_element, "lock")
self.error_code = self.get_node_value_as_int(hkr_element, "errorcode")
self.battery_low = self.get_node_value_as_int_as_bool(
hkr_element, "batterylow"
)
self.battery_level = int(self.get_node_value_as_int(hkr_element, "battery"))
self.window_open = self.get_node_value_as_int_as_bool(
hkr_element, "windowopenactiv"
)
self.summer_active = self.get_node_value_as_int_as_bool(
hkr_element, "summeractive"
)
self.holiday_active = self.get_node_value_as_int_as_bool(
hkr_element, "holidayactive"
)
nextchange_element = hkr_element.find("nextchange")
self.nextchange_endperiod = int(
self.get_node_value_as_int(nextchange_element, "endperiod")
)
self.nextchange_temperature = self.get_temp_from_node(
nextchange_element, "tchange"
)
except Exception:
pass
def get_temperature(self):
"""Get the device temperature value."""
return self._fritz.get_temperature(self.ain)
def get_target_temperature(self):
"""Get the thermostate target temperature."""
return self._fritz.get_target_temperature(self.ain)
def set_target_temperature(self, temperature):
"""Set the thermostate target temperature."""
return self._fritz.set_target_temperature(self.ain, temperature)
def set_window_open(self, seconds):
"""Set the thermostate to window open."""
return self._fritz.set_window_open(self.ain, seconds)
def get_comfort_temperature(self):
"""Get the thermostate comfort temperature."""
return self._fritz.get_comfort_temperature(self.ain)
def get_eco_temperature(self):
"""Get the thermostate eco temperature."""
return self._fritz.get_eco_temperature(self.ain)
def get_hkr_state(self):
"""Get the thermostate state."""
try:
return {
126.5: "off",
127.0: "on",
self.eco_temperature: "eco",
self.comfort_temperature: "comfort",
}[self.target_temperature]
except KeyError:
return "manual"
def set_hkr_state(self, state):
"""Set the state of the thermostat.
Possible values for state are: 'on', 'off', 'comfort', 'eco'.
"""
try:
value = {
"off": 0,
"on": 100,
"eco": self.eco_temperature,
"comfort": self.comfort_temperature,
}[state]
except KeyError:
return
self.set_target_temperature(value)
| 33.167883 | 88 | 0.626981 | 4,347 | 0.956646 | 0 | 0 | 167 | 0.036752 | 0 | 0 | 759 | 0.167033 |
ff18552d2ab970009d07a39acf26b2dfab64a7a4 | 14,721 | py | Python | UI/mainUI.py | steenzout/python-storj-gui | 5e81898f8a8a97f8ffa91563e20cf8b851075c64 | [
"MIT"
] | null | null | null | UI/mainUI.py | steenzout/python-storj-gui | 5e81898f8a8a97f8ffa91563e20cf8b851075c64 | [
"MIT"
] | null | null | null | UI/mainUI.py | steenzout/python-storj-gui | 5e81898f8a8a97f8ffa91563e20cf8b851075c64 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
import threading
import storj.exception as sjexc
from PyQt4 import QtCore, QtGui
from .qt_interfaces.dashboard_ui import Ui_MainMenu
from .bucket_edition import BucketEditingUI
from .client_config import ClientConfigurationUI
from .engine import StorjEngine
from .file_download import SingleFileDownloadUI
from .file_mirror import FileMirrorsListUI
from .file_upload import SingleFileUploadUI
from .utilities.tools import Tools
from .sync_menu import SyncMenuUI
from .resources.constants import DISPLAY_FILE_CREATION_DATE_IN_MAIN,\
FILE_LIST_SORTING_MAIN_ENABLED, BUCKETS_LIST_SORTING_ENABLED, DATA_TABLE_EDIT_ENABLED
from .resources.custom_qt_interfaces import TableModel
class ExtendedQLabel(QtGui.QLabel):
""""""
def __init(self, parent):
QtGui.QLabel.__init__(self, parent)
def mouseReleaseEvent(self, ev):
self.emit(QtCore.SIGNAL('clicked()'))
class MainUI(QtGui.QMainWindow):
"""Main UI section."""
__logger = logging.getLogger('%s.MainUI' % __name__)
def __init__(self, parent=None, bucketid=None):
QtGui.QWidget.__init__(self, parent)
self.file_manager_ui = Ui_MainMenu()
self.file_manager_ui.setupUi(self)
# self.change_loading_gif()
# Connect ComboBox change listener
QtCore.QObject.connect(self.file_manager_ui.bucket_select_combo_box,
QtCore.SIGNAL('currentIndexChanged(const QString&)'),
self.createNewFileListUpdateThread)
# Open mirrors list window
QtCore.QObject.connect(self.file_manager_ui.file_mirrors_bt,
QtCore.SIGNAL('clicked()'),
self.open_mirrors_list_window)
# Create bucket action
QtCore.QObject.connect(self.file_manager_ui.file_download_bt,
QtCore.SIGNAL('clicked()'),
self.open_single_file_download_window)
# Delete selected file
QtCore.QObject.connect(self.file_manager_ui.file_delete_bt,
QtCore.SIGNAL('clicked()'),
self.delete_selected_file)
self.connect(self, QtCore.SIGNAL('changeLoadingGif'),
self.change_loading_gif)
if not DATA_TABLE_EDIT_ENABLED:
self.file_manager_ui.files_list_tableview.setEditTriggers(
QtGui.QAbstractItemView.NoEditTriggers)
self.file_manager_ui.settings_bt.mousePressEvent = \
self.open_settings_window
self.file_manager_ui.refresh_bt.mousePressEvent = \
self.createNewFileListUpdateThread
# Delete selected file
QtCore.QObject.connect(self.file_manager_ui.new_file_upload_bt,
QtCore.SIGNAL('clicked()'),
self.open_single_file_upload_window)
# Open bucket edit window
QtCore.QObject.connect(self.file_manager_ui.edit_bucket_bt,
QtCore.SIGNAL('clicked()'),
lambda: self.open_bucket_editing_window(action='edit'))
# Open bucket edit window
QtCore.QObject.connect(self.file_manager_ui.create_bucket_bt,
QtCore.SIGNAL('clicked()'),
lambda: self.open_bucket_editing_window(action='add'))
self.storj_engine = StorjEngine() # init StorjEngine
user_email = self.storj_engine.account_manager.get_user_email()
self.file_manager_ui.account_label.setText(user_email)
self.createNewBucketResolveThread()
def open_sync_menu(self):
self.open_sync_menu_window = SyncMenuUI(self)
self.open_sync_menu_window.show()
def change_loading_gif(self, is_visible):
if is_visible:
movie = QtGui.QMovie(':/resources/loading.gif')
self.file_manager_ui.refresh_bt.setMovie(movie)
movie.start()
else:
self.file_manager_ui.refresh_bt.setPixmap(QtGui.QPixmap((':/resources/refresh.png')))
def open_bucket_editing_window(self, action):
if action == 'edit':
self.bucket_editing_window = BucketEditingUI(
self, action=action,
bucketid=str(self.current_selected_bucket_id),
dashboard_instance=self)
else:
self.bucket_editing_window = BucketEditingUI(
self, action=action, dashboard_instance=self)
self.bucket_editing_window.show()
def open_single_file_upload_window(self):
self.single_file_upload_window = SingleFileUploadUI(
self, dashboard_instance=self)
self.single_file_upload_window.show()
def open_settings_window(self, b):
self.open_settings_window = ClientConfigurationUI(self)
self.open_settings_window.show()
def delete_selected_file(self):
self.current_bucket_index = \
self.file_manager_ui.bucket_select_combo_box.currentIndex()
self.current_selected_bucket_id = \
self.bucket_id_list[self.current_bucket_index]
tablemodel = self.file_manager_ui.files_list_tableview.model()
rows = sorted(
set(index.row() for index in
self.file_manager_ui.files_list_tableview.selectedIndexes()))
selected = False
for row in rows:
selected = True
index = tablemodel.index(row, 2) # get file ID index
index_filename = tablemodel.index(row, 0) # get file name index
# We suppose data are strings
selected_file_id = str(tablemodel.data(index).toString())
selected_file_name = str(tablemodel.data(index_filename).toString())
msgBox = QtGui.QMessageBox(
QtGui.QMessageBox.Question,
'Question',
'Are you sure you want to delete this file? File name: %s' % str(selected_file_name).decode('utf-8'),
(QtGui.QMessageBox.Yes | QtGui.QMessageBox.No))
result = msgBox.exec_()
self.__logger.debug(result)
if result == QtGui.QMessageBox.Yes:
try:
self.storj_engine.storj_client.file_remove(
str(self.current_selected_bucket_id),
str(selected_file_id))
# Update files list
self.createNewFileListUpdateThread()
QtGui.QMessageBox.about(
self,
'Success',
'File "%s" has been deleted successfully' % selected_file_name)
except sjexc.StorjBridgeApiError as e:
self.__logger.error(e)
QtGui.QMessageBox.about(
self,
'Error',
'Bridge exception occured while trying to delete file: %s' % e)
except Exception as e:
self.__logger.error(e)
QtGui.QMessageBox.about(
self,
'Error',
'Unhandled exception occured while trying to delete file: %s' % e)
if not selected:
QtGui.QMessageBox.about(
self,
'Information',
'Please select file which you want to delete')
return True
def open_mirrors_list_window(self):
self.current_bucket_index = \
self.file_manager_ui.bucket_select_combo_box.currentIndex()
self.current_selected_bucket_id = \
self.bucket_id_list[self.current_bucket_index]
tablemodel = self.file_manager_ui.files_list_tableview.model()
rows = sorted(
set(index.row() for index in
self.file_manager_ui.files_list_tableview.selectedIndexes()))
i = 0
for row in rows:
self.__logger.info('Row %d is selected' % row)
index = tablemodel.index(row, 2) # get file ID
index_filename = tablemodel.index(row, 0) # get file ID
# We suppose data are strings
selected_file_id = str(tablemodel.data(index).toString())
selected_file_name = str(tablemodel.data(index_filename).toString())
self.file_mirrors_list_window = \
FileMirrorsListUI(self,
str(self.current_selected_bucket_id),
selected_file_id,
filename=selected_file_name)
self.file_mirrors_list_window.show()
i += 1
if i == 0:
QtGui.QMessageBox.about(self,
'Warning!',
'Please select file from file list!')
self.__logger.debug(1)
def createNewFileListUpdateThread(self, a=None):
download_thread = threading.Thread(target=self.update_files_list,
args=())
download_thread.start()
def update_files_list(self):
self.tools = Tools()
model = TableModel(1, 1)
file_list_header_labels = ['File name', 'File size', 'File ID']
if DISPLAY_FILE_CREATION_DATE_IN_MAIN:
file_list_header_labels.append('Creation date')
model.setHorizontalHeaderLabels(file_list_header_labels)
self.current_bucket_index = \
self.file_manager_ui.bucket_select_combo_box.currentIndex()
self.current_selected_bucket_id = \
self.bucket_id_list[self.current_bucket_index]
i = 0
try:
self.emit(QtCore.SIGNAL('changeLoadingGif'), True)
for self.file_details in self.storj_engine.storj_client.bucket_files(str(self.current_selected_bucket_id)):
item = QtGui.QStandardItem(
str(self.file_details['filename'].replace('[DECRYPTED]', '')).decode('utf8'))
model.setItem(i, 0, item) # row, column, item (StandardItem)
file_size_str = self.tools.human_size(int(self.file_details['size'])) # get human readable file size
item = QtGui.QStandardItem(str(file_size_str))
model.setItem(i, 1, item) # row, column, item (QQtGui.StandardItem)
item = QtGui.QStandardItem(str(self.file_details['id']))
model.setItem(i, 2, item) # row, column, item (QStandardItem)
if DISPLAY_FILE_CREATION_DATE_IN_MAIN:
item = QtGui.QStandardItem(str(self.file_details['created']).replace('Z', '').replace('T', ' '))
model.setItem(i, 3, item) # row, column, item (QStandardItem)
i = i + 1
self.__logger.debug(self.file_details['filename'].replace('[DECRYPTED]', '').decode('utf8'))
except sjexc.StorjBridgeApiError as e:
self.__logger.error(e)
self.file_manager_ui.files_list_tableview.clearFocus()
self.file_manager_ui.files_list_tableview.setModel(model)
self.file_manager_ui.files_list_tableview.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)
if FILE_LIST_SORTING_MAIN_ENABLED:
self.file_manager_ui.files_list_tableview.setSortingEnabled(True)
self.file_manager_ui.files_list_tableview.horizontalHeader().sortIndicatorChanged.connect(self.handleSortIndicatorChanged)
self.file_manager_ui.files_list_tableview.sortByColumn(0, QtCore.Qt.AscendingOrder)
self.emit(QtCore.SIGNAL('changeLoadingGif'), False)
def handleSortIndicatorChanged(self, index, order):
if index != 0:
self.file_manager_ui.files_list_tableview.horizontalHeader().setSortIndicator(0, self.file_manager_ui.files_list_tableview.model().sortOrder())
def createNewBucketResolveThread(self):
download_thread = threading.Thread(
target=self.initialize_bucket_select_combobox,
args=())
download_thread.start()
def initialize_bucket_select_combobox(self):
self.file_manager_ui.bucket_select_combo_box.clear()
self.buckets_list = []
self.bucket_id_list = []
self.bucket_id_name_2D_list = []
self.storj_engine = StorjEngine() # init StorjEngine
i = 0
self.emit(QtCore.SIGNAL('changeLoadingGif'), True)
try:
for bucket in self.storj_engine.storj_client.bucket_list():
# Append buckets to list
self.bucket_id_name_2D_list.append(
[bucket.id, bucket.name.decode('utf8')])
i += 1
if BUCKETS_LIST_SORTING_ENABLED:
self.bucket_id_name_2D_list = \
sorted(self.bucket_id_name_2D_list,
key=lambda x: x[1],
reverse=False)
for arr_data in self.bucket_id_name_2D_list:
self.buckets_list.append(arr_data[1])
self.bucket_id_list.append(arr_data[0])
except sjexc.StorjBridgeApiError as e:
self.__logger.error(e)
QtGui.QMessageBox.about(
self,
'Unhandled bucket resolving exception',
'Exception: ' % e)
self.file_manager_ui.bucket_select_combo_box.addItems(
self.buckets_list)
self.emit(QtCore.SIGNAL('changeLoadingGif'), False)
def open_single_file_download_window(self):
self.current_bucket_index = \
self.file_manager_ui.bucket_select_combo_box.currentIndex()
self.current_selected_bucket_id = \
self.bucket_id_list[self.current_bucket_index]
tablemodel = self.file_manager_ui.files_list_tableview.model()
rows = sorted(set(index.row() for index in
self.file_manager_ui.files_list_tableview.selectedIndexes()))
i = 0
for row in rows:
self.__logger.info('Row %d is selected' % row)
index = tablemodel.index(row, 2) # get file ID
# We suppose data are strings
selected_file_id = str(tablemodel.data(index).toString())
self.file_mirrors_list_window = SingleFileDownloadUI(
self,
self.current_selected_bucket_id,
selected_file_id)
self.file_mirrors_list_window.show()
i += 1
if i == 0:
QtGui.QMessageBox.about(self,
'Warning!',
'Please select file from file list!')
self.__logger.debug(1)
| 42.180516 | 155 | 0.611643 | 13,994 | 0.950615 | 0 | 0 | 0 | 0 | 0 | 0 | 1,599 | 0.10862 |
ff18e68d25414cdb3fdcaa970634bcf4be8109ba | 7,008 | py | Python | biocircuits/reg.py | justinbois/biocircuits | 4f696be5a240ce6157e331d67bb78c3b2b3b88cf | [
"BSD-3-Clause"
] | 3 | 2021-03-08T06:19:39.000Z | 2022-03-27T12:59:51.000Z | biocircuits/reg.py | justinbois/be150 | 96afe62ff40276f81d8a86eaa7b54d442517eec7 | [
"BSD-3-Clause"
] | 7 | 2019-04-14T22:14:20.000Z | 2021-05-07T16:51:05.000Z | biocircuits/reg.py | justinbois/be150 | 96afe62ff40276f81d8a86eaa7b54d442517eec7 | [
"BSD-3-Clause"
] | 4 | 2019-04-14T21:24:55.000Z | 2022-03-27T12:59:58.000Z | def rep_hill(x, n):
"""Dimensionless production rate for a gene repressed by x.
Parameters
----------
x : float or NumPy array
Concentration of repressor.
n : float
Hill coefficient.
Returns
-------
output : NumPy array or float
1 / (1 + x**n)
"""
return 1.0 / (1.0 + x ** n)
def act_hill(x, n):
"""Dimensionless production rate for a gene activated by x.
Parameters
----------
x : float or NumPy array
Concentration of activator.
n : float
Hill coefficient.
Returns
-------
output : NumPy array or float
x**n / (1 + x**n)
"""
return 1.0 - rep_hill(x, n)
def aa_and(x, y, nx, ny):
"""Dimensionless production rate for a gene regulated by two
activators with AND logic in the absence of leakage.
Parameters
----------
x : float or NumPy array
Concentration of first activator.
y : float or NumPy array
Concentration of second activator.
nx : float
Hill coefficient for first activator.
ny : float
Hill coefficient for second activator.
Returns
-------
output : NumPy array or float
x**nx * y**ny / (1 + x**nx) / (1 + y**ny)
"""
return x ** nx * y ** ny / (1.0 + x ** nx) / (1.0 + y ** ny)
def aa_or(x, y, nx, ny):
"""Dimensionless production rate for a gene regulated by two
activators with OR logic in the absence of leakage.
Parameters
----------
x : float or NumPy array
Concentration of first activator.
y : float or NumPy array
Concentration of second activator.
nx : float
Hill coefficient for first activator.
ny : float
Hill coefficient for second activator.
Returns
-------
output : NumPy array or float
(x**nx + y**ny + x**nx * y**ny) / (1 + x**nx) / (1 + y**ny)
"""
denom = (1.0 + x ** nx) * (1.0 + y ** ny)
return (denom - 1.0) / denom
def aa_or_single(x, y, nx, ny):
"""Dimensionless production rate for a gene regulated by two
activators with OR logic in the absence of leakage with single
occupancy.
Parameters
----------
x : float or NumPy array
Concentration of first activator.
y : float or NumPy array
Concentration of second activator.
nx : float
Hill coefficient for first activator.
ny : float
Hill coefficient for second activator.
Returns
-------
output : NumPy array or float
(x**nx + y**ny) / (1 + x**nx + y**ny)
"""
num = x ** nx + y ** ny
return num / (1.0 + num)
def rr_and(x, y, nx, ny):
"""Dimensionless production rate for a gene regulated by two
repressors with AND logic in the absence of leakage.
Parameters
----------
x : float or NumPy array
Concentration of first repressor.
y : float or NumPy array
Concentration of second repressor.
nx : float
Hill coefficient for first repressor.
ny : float
Hill coefficient for second repressor.
Returns
-------
output : NumPy array or float
1 / (1 + x**nx) / (1 + y**ny)
"""
return 1.0 / (1.0 + x ** nx) / (1.0 + y ** ny)
def rr_and_single(x, y, nx, ny):
"""Dimensionless production rate for a gene regulated by two
repressors with AND logic in the absence of leakage with
single occupancy.
Parameters
----------
x : float or NumPy array
Concentration of first repressor.
y : float or NumPy array
Concentration of second repressor.
nx : float
Hill coefficient for first repressor.
ny : float
Hill coefficient for second repressor.
Returns
-------
output : NumPy array or float
1 / (1 + x**nx + y**ny)
"""
return 1.0 / (1.0 + x ** nx + y ** ny)
def rr_or(x, y, nx, ny):
"""Dimensionless production rate for a gene regulated by two
repressors with OR logic in the absence of leakage.
Parameters
----------
x : float or NumPy array
Concentration of first repressor.
y : float or NumPy array
Concentration of second repressor.
nx : float
Hill coefficient for first repressor.
ny : float
Hill coefficient for second repressor.
Returns
-------
output : NumPy array or float
(1 + x**nx + y**ny) / (1 + x**nx) / (1 + y**ny)
"""
return (1.0 + x ** nx + y ** ny) / (1.0 + x ** nx) / (1.0 + y ** ny)
def ar_and(x, y, nx, ny):
"""Dimensionless production rate for a gene regulated by one
activator and one repressor with AND logic in the absence of
leakage.
Parameters
----------
x : float or NumPy array
Concentration of activator.
y : float or NumPy array
Concentration of repressor.
nx : float
Hill coefficient for activator.
ny : float
Hill coefficient for repressor.
Returns
-------
output : NumPy array or float
x ** nx / (1 + x**nx) / (1 + y**ny)
"""
return x ** nx / (1.0 + x ** nx) / (1.0 + y ** ny)
def ar_or(x, y, nx, ny):
"""Dimensionless production rate for a gene regulated by one
activator and one repressor with OR logic in the absence of
leakage.
Parameters
----------
x : float or NumPy array
Concentration of activator.
y : float or NumPy array
Concentration of repressor.
nx : float
Hill coefficient for activator.
ny : float
Hill coefficient for repressor.
Returns
-------
output : NumPy array or float
(1 + x**nx + x**nx * y**ny)) / (1 + x**nx) / (1 + y**ny)
"""
return (1.0 + x ** nx * (1.0 + y ** ny)) / (1.0 + x ** nx) / (1.0 + y ** ny)
def ar_and_single(x, y, nx, ny):
"""Dimensionless production rate for a gene regulated by one
activator and one repressor with AND logic in the absence of
leakage with single occupancy.
Parameters
----------
x : float or NumPy array
Concentration of activator.
y : float or NumPy array
Concentration of repressor.
nx : float
Hill coefficient for activator.
ny : float
Hill coefficient for repressor.
Returns
-------
output : NumPy array or float
x ** nx / (1 + x**nx + y**ny)
"""
return x ** nx / (1.0 + x ** nx + y ** ny)
def ar_or_single(x, y, nx, ny):
"""Dimensionless production rate for a gene regulated by one
activator and one repressor with OR logic in the absence of
leakage with single occupancy.
Parameters
----------
x : float or NumPy array
Concentration of activator.
y : float or NumPy array
Concentration of repressor.
nx : float
Hill coefficient for activator.
ny : float
Hill coefficient for repressor.
Returns
-------
output : NumPy array or float
(1 + x**nx) / (1 + x**nx + y**ny)
"""
return (1.0 + x ** nx) / (1.0 + x ** nx + y ** ny)
| 25.67033 | 80 | 0.56835 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,933 | 0.846604 |
ff18e96c20d7d388479a01b1d0934ac6e969a33f | 2,071 | py | Python | fourpisky/log_config.py | 4pisky/fourpisky-core | 1dc9c4f73dfef075e2a27c3c8453d811a5a99e58 | [
"BSD-2-Clause"
] | 2 | 2016-08-25T22:20:58.000Z | 2018-11-18T21:16:11.000Z | fourpisky/log_config.py | 4pisky/fourpisky-core | 1dc9c4f73dfef075e2a27c3c8453d811a5a99e58 | [
"BSD-2-Clause"
] | 2 | 2016-11-01T14:10:58.000Z | 2016-11-01T14:11:39.000Z | fourpisky/log_config.py | 4pisky/fourpisky-core | 1dc9c4f73dfef075e2a27c3c8453d811a5a99e58 | [
"BSD-2-Clause"
] | null | null | null | import logging
from fourpisky.reports import EmailHandler
from fourpisky.local import contacts
full_date_fmt = "%y-%m-%d (%a) %H:%M:%S"
short_date_fmt = "%H:%M:%S"
verbose_formatter = logging.Formatter(
'%(asctime)s:%(name)s:%(levelname)s:%(message)s',
# '%(asctime)s:%(levelname)s:%(message)s',
full_date_fmt)
def setup_logfile_handlers(logger, logfile_pathstem, filters=None,
log_chunk_bytesize = 5e6):
info_logfile_path = logfile_pathstem + ".log"
debug_logfile_path = logfile_pathstem + ".debug.log"
info_filehandler = logging.handlers.RotatingFileHandler(
info_logfile_path, maxBytes=log_chunk_bytesize, backupCount=10)
info_filehandler.setLevel(logging.INFO)
debug_filehandler = logging.handlers.RotatingFileHandler(
debug_logfile_path, maxBytes=log_chunk_bytesize, backupCount=10)
debug_filehandler.setLevel(logging.DEBUG)
for fh in (info_filehandler, debug_filehandler):
fh.setFormatter(verbose_formatter)
if filters:
for f in filters:
fh.addFilter(f)
logger.addHandler(fh)
def setup_email_errorhandler(logger):
email_handler = EmailHandler(
recipients=[p.email for p in contacts.error_contacts])
email_handler.setFormatter(verbose_formatter)
email_handler.setLevel(logging.ERROR)
logger.addHandler(email_handler)
def setup_logging(logfile_pathstem=None, email_errors=True):
"""
Set up default logging setup
"""
std_formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s',
short_date_fmt)
stdout_logger = logging.StreamHandler()
stdout_logger.setFormatter(std_formatter)
stdout_logger.setLevel(logging.DEBUG)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.handlers = []
logger.addHandler(stdout_logger)
if logfile_pathstem:
setup_logfile_handlers(logger,logfile_pathstem)
if email_errors:
setup_email_errorhandler(logger)
return logger
| 33.403226 | 78 | 0.701593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 225 | 0.108643 |
ff19f80ce54b21669a5438b2e634f31309d289a7 | 602 | py | Python | Python Advanced/Advanced/Tuples and Sets/Lab/Task05.py | IvanTodorovBG/SoftUni | 7b667f6905d9f695ab1484efbb02b6715f6d569e | [
"MIT"
] | 1 | 2022-03-16T10:23:04.000Z | 2022-03-16T10:23:04.000Z | Python Advanced/Advanced/Tuples and Sets/Lab/Task05.py | IvanTodorovBG/SoftUni | 7b667f6905d9f695ab1484efbb02b6715f6d569e | [
"MIT"
] | null | null | null | Python Advanced/Advanced/Tuples and Sets/Lab/Task05.py | IvanTodorovBG/SoftUni | 7b667f6905d9f695ab1484efbb02b6715f6d569e | [
"MIT"
] | null | null | null | n = int(input())
vip_guest = set()
regular_guest = set()
for _ in range(n):
reservation_code = input()
if reservation_code[0].isdigit():
vip_guest.add(reservation_code)
else:
regular_guest.add(reservation_code)
command = input()
while command != "END":
if command[0].isdigit():
vip_guest.discard(command)
else:
regular_guest.discard(command)
command = input()
missing_guest = len(vip_guest) + len(regular_guest)
print(missing_guest)
for vip in sorted(vip_guest):
print(vip)
for regular in sorted(regular_guest):
print(regular)
| 17.705882 | 51 | 0.669435 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0.008306 |
ff1a0432ebfc110c3ff64a1bfb40a9d6b66b4a53 | 157 | py | Python | pyz3r/exceptions.py | mgius/pyz3r | f6de06db25a06353b73e9ef7003c80de7073373d | [
"Apache-2.0"
] | null | null | null | pyz3r/exceptions.py | mgius/pyz3r | f6de06db25a06353b73e9ef7003c80de7073373d | [
"Apache-2.0"
] | null | null | null | pyz3r/exceptions.py | mgius/pyz3r | f6de06db25a06353b73e9ef7003c80de7073373d | [
"Apache-2.0"
] | null | null | null | class alttprException(Exception):
pass
class alttprFailedToRetrieve(Exception):
pass
class alttprFailedToGenerate(Exception):
pass
| 14.272727 | 41 | 0.719745 | 143 | 0.910828 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
ff1d37048cb854203f9a4fa6e024de7bc7ef651a | 356 | py | Python | exercicios-Python/desaf109/pythonteste.py | marcelo-py/Exercicios-Python | d654d54821983897dbc377a2d3db97671dd75b5b | [
"MIT"
] | null | null | null | exercicios-Python/desaf109/pythonteste.py | marcelo-py/Exercicios-Python | d654d54821983897dbc377a2d3db97671dd75b5b | [
"MIT"
] | null | null | null | exercicios-Python/desaf109/pythonteste.py | marcelo-py/Exercicios-Python | d654d54821983897dbc377a2d3db97671dd75b5b | [
"MIT"
] | null | null | null | from desaf109 import moeda
p = float(input('Digite um preço: R$'))
print('A metade de {} é {}'.format(moeda.moeda(p), moeda.metade(p, True)))
print('O dobro de {} é {}'.format(moeda.moeda(p), moeda.dobro(p, True)))
print('Se adcionarmos 10% fica {}'.format(moeda.aumentar(p, 10, True)))
print('Se tirarmos 13% fica {}'.format(moeda.diminuir(p, 13, True))) | 59.333333 | 74 | 0.671348 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.331476 |
ff1fdf4c603a65101e9db8bef4cb3a81cab16a6b | 1,153 | py | Python | src/curt/curt/modules/vision/vision_processor_service.py | sanyaade-teachings/cep | 59e22b148c3a95eff521ce75cf4eacbcfb074115 | [
"MIT"
] | 108 | 2021-08-09T17:10:39.000Z | 2022-03-21T21:59:03.000Z | src/curt/curt/modules/vision/vision_processor_service.py | sanyaade-teachings/cep | 59e22b148c3a95eff521ce75cf4eacbcfb074115 | [
"MIT"
] | 15 | 2021-09-19T01:25:25.000Z | 2022-03-28T18:47:49.000Z | src/curt/curt/modules/vision/vision_processor_service.py | sanyaade-teachings/cep | 59e22b148c3a95eff521ce75cf4eacbcfb074115 | [
"MIT"
] | 14 | 2021-08-10T04:42:17.000Z | 2022-03-28T16:30:34.000Z | """
Copyright (C) Cortic Technology Corp. - All Rights Reserved
Written by Michael Ng <michaelng@cortic.ca>, 2021
"""
# need to advertise different processor type, eg CPU, GPU, TPU
import traceback
import logging
from curt.base_service import BaseService
class VisionProcessorService(BaseService):
def __init__(self):
super().__init__("VisionProcessor")
def execute_function(self, worker, data):
config_worker = data[-1]
try:
if config_worker:
return worker.config_worker(data[0])
else:
if isinstance(data[0], list):
return worker.run_inference(data[0])
elif isinstance(data[0], dict):
data_list = []
for param in data[0]["ready_data"]:
data_list.append(param)
for guid in data[0].keys():
if guid != "ready_data":
data_list.append(data[0][guid])
return worker.run_inference(data_list)
except Exception as e:
logging.error(traceback.format_exc())
| 32.027778 | 62 | 0.565481 | 891 | 0.772767 | 0 | 0 | 0 | 0 | 0 | 0 | 222 | 0.192541 |
ff204abc808df6bc7b2066510508ef7023f10267 | 21 | py | Python | src_py/hat/gateway/devices/modbus/__init__.py | hat-open/hat-gateway | 43d02e3809a1f9dfcb6ee797bb7034b61dd3c469 | [
"Apache-2.0"
] | 2 | 2022-02-01T13:43:08.000Z | 2022-02-24T09:30:36.000Z | src_py/hat/gateway/devices/modbus/__init__.py | hat-open/hat-gateway | 43d02e3809a1f9dfcb6ee797bb7034b61dd3c469 | [
"Apache-2.0"
] | null | null | null | src_py/hat/gateway/devices/modbus/__init__.py | hat-open/hat-gateway | 43d02e3809a1f9dfcb6ee797bb7034b61dd3c469 | [
"Apache-2.0"
] | null | null | null | """Modbus devices"""
| 10.5 | 20 | 0.619048 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 20 | 0.952381 |
205c30821b9360c92edbacec46a9037da22b2a7d | 1,450 | py | Python | lamp/neuralnets.py | bdevl/PGMCPC | cac2fe4304ae42ef2a0d94219b4349d51e86ab2d | [
"MIT"
] | 3 | 2020-10-23T13:40:56.000Z | 2022-02-10T03:42:52.000Z | lamp/neuralnets.py | pkmtum/generative-physics-informed-pde | 63ec383da0f2dbf0d8ffbbb44a670e90d07c132e | [
"MIT"
] | null | null | null | lamp/neuralnets.py | pkmtum/generative-physics-informed-pde | 63ec383da0f2dbf0d8ffbbb44a670e90d07c132e | [
"MIT"
] | null | null | null | import lamp.modules
import torch
import numpy as np
from lamp.utils import get_activation_function
class FeedforwardNeuralNetwork(lamp.modules.BaseModule):
def __init__(self, dim_in, dim_out, architecture, dropout, outf=None, dtype = None, device = None):
super(FeedforwardNeuralNetwork, self).__init__()
architecture = [dim_in] + architecture + [dim_out]
self.layers = torch.nn.Sequential()
for n in range(len(architecture)-1):
self.layers.add_module('fc{}'.format(n+1), torch.nn.Linear(architecture[n], architecture[n+1]))
if dropout is not None:
self.layers.add_module('dropout{}'.format(n+1), torch.nn.Dropout(p=0.5))
if n != len(architecture) - 2:
self.layers.add_module('activ{}'.format(n+1), torch.nn.ReLU())
else:
if outf is not None:
self.layers.add_module('out_fct', get_activation_function(outf))
self._to(device=device, dtype=dtype)
def forward(self, x):
return self.layers(x)
@classmethod
def FromLinearDecay(cls, dim_in, dim_out, num_hidden_layers, outf = None, dropout=None, dtype=None, device=None):
architecture = list(np.linspace(dim_in, dim_out, num_hidden_layers+2).astype(int))
architecture_hidden = architecture[1:-1]
return cls(dim_in, dim_out, architecture_hidden, dropout, outf, dtype, device)
| 26.851852 | 117 | 0.648276 | 1,339 | 0.923448 | 0 | 0 | 361 | 0.248966 | 0 | 0 | 35 | 0.024138 |
205c8698d1be613dcc084ddfdcc8d9120bb54da7 | 130 | py | Python | PythonExercicios/ex010.py | VitorFRodrigues/Python-curso | af75ff4a7ca14bc7e67b4f3362af837d355b1746 | [
"MIT"
] | null | null | null | PythonExercicios/ex010.py | VitorFRodrigues/Python-curso | af75ff4a7ca14bc7e67b4f3362af837d355b1746 | [
"MIT"
] | null | null | null | PythonExercicios/ex010.py | VitorFRodrigues/Python-curso | af75ff4a7ca14bc7e67b4f3362af837d355b1746 | [
"MIT"
] | null | null | null | n = float(input('Quanto dinheiro você tem na carteira? R$'))
print('Com R${:.2f} você pode comprar US${:.2f}.'.format(n, n/3.27))
| 43.333333 | 68 | 0.646154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.659091 |
205c895c1f60cd3b978288b5cd1339799a85f756 | 3,267 | py | Python | tests/data/expected_tabulated.py | CozyDoomer/pypistats | 39e4415c736d025d16aa0131d2107756d0f127fa | [
"MIT"
] | 1 | 2020-09-13T14:18:09.000Z | 2020-09-13T14:18:09.000Z | tests/data/expected_tabulated.py | CozyDoomer/pypistats | 39e4415c736d025d16aa0131d2107756d0f127fa | [
"MIT"
] | 5 | 2020-09-13T14:18:30.000Z | 2020-09-13T14:33:37.000Z | tests/data/expected_tabulated.py | Smirenost/pypistats | 431201080061ecd41d58b12ad4837de6883d66ae | [
"MIT"
] | null | null | null | EXPECTED_TABULATED_HTML = """
<table>
<thead>
<tr>
<th>category</th>
<th>date</th>
<th>downloads</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">2.6</td>
<td align="left">2018-08-15</td>
<td align="right">51</td>
</tr>
<tr>
<td align="left">2.7</td>
<td align="left">2018-08-15</td>
<td align="right">63,749</td>
</tr>
<tr>
<td align="left">3.2</td>
<td align="left">2018-08-15</td>
<td align="right">2</td>
</tr>
<tr>
<td align="left">3.3</td>
<td align="left">2018-08-15</td>
<td align="right">40</td>
</tr>
<tr>
<td align="left">3.4</td>
<td align="left">2018-08-15</td>
<td align="right">6,095</td>
</tr>
<tr>
<td align="left">3.5</td>
<td align="left">2018-08-15</td>
<td align="right">20,358</td>
</tr>
<tr>
<td align="left">3.6</td>
<td align="left">2018-08-15</td>
<td align="right">35,274</td>
</tr>
<tr>
<td align="left">3.7</td>
<td align="left">2018-08-15</td>
<td align="right">6,595</td>
</tr>
<tr>
<td align="left">3.8</td>
<td align="left">2018-08-15</td>
<td align="right">3</td>
</tr>
<tr>
<td align="left">null</td>
<td align="left">2018-08-15</td>
<td align="right">1,019</td>
</tr>
</tbody>
</table>
"""
EXPECTED_TABULATED_MD = """
| category | date | downloads |
|----------|------------|----------:|
| 2.6 | 2018-08-15 | 51 |
| 2.7 | 2018-08-15 | 63,749 |
| 3.2 | 2018-08-15 | 2 |
| 3.3 | 2018-08-15 | 40 |
| 3.4 | 2018-08-15 | 6,095 |
| 3.5 | 2018-08-15 | 20,358 |
| 3.6 | 2018-08-15 | 35,274 |
| 3.7 | 2018-08-15 | 6,595 |
| 3.8 | 2018-08-15 | 3 |
| null | 2018-08-15 | 1,019 |
"""
EXPECTED_TABULATED_RST = """
.. table::
========== ============ ===========
category date downloads
========== ============ ===========
2.6 2018-08-15 51
2.7 2018-08-15 63,749
3.2 2018-08-15 2
3.3 2018-08-15 40
3.4 2018-08-15 6,095
3.5 2018-08-15 20,358
3.6 2018-08-15 35,274
3.7 2018-08-15 6,595
3.8 2018-08-15 3
null 2018-08-15 1,019
========== ============ ===========
""" # noqa: W291
EXPECTED_TABULATED_TSV = """
"category" \t "date" \t "downloads"
"2.6" \t "2018-08-15" \t 51
"2.7" \t "2018-08-15" \t 63,749
"3.2" \t "2018-08-15" \t 2
"3.3" \t "2018-08-15" \t 40
"3.4" \t "2018-08-15" \t 6,095
"3.5" \t "2018-08-15" \t 20,358
"3.6" \t "2018-08-15" \t 35,274
"3.7" \t "2018-08-15" \t 6,595
"3.8" \t "2018-08-15" \t 3
"null" \t "2018-08-15" \t 1,019
""" # noqa: W291
| 28.911504 | 44 | 0.379859 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,155 | 0.965718 |
20605b002bc8502c420a5ef9e4b77ba1cb4d2244 | 2,474 | py | Python | pressio4py/apps/burgers1d.py | Pressio/pressio4py | 36676dbd112a7c7960ccbf302ff14d4376c819ec | [
"Unlicense",
"BSD-3-Clause"
] | 4 | 2020-07-06T20:01:39.000Z | 2022-03-05T09:23:40.000Z | pressio4py/apps/burgers1d.py | Pressio/pressio4py | 36676dbd112a7c7960ccbf302ff14d4376c819ec | [
"Unlicense",
"BSD-3-Clause"
] | 19 | 2020-02-27T20:52:53.000Z | 2022-01-13T16:24:49.000Z | pressio4py/apps/burgers1d.py | Pressio/pressio4py | 36676dbd112a7c7960ccbf302ff14d4376c819ec | [
"Unlicense",
"BSD-3-Clause"
] | 1 | 2022-03-03T16:05:09.000Z | 2022-03-03T16:05:09.000Z |
import numpy as np
import math
from scipy.sparse import csr_matrix, diags
from scipy import linalg
import time
try:
from numba import jit, njit
numbaOn = True
except ModuleNotFoundError:
numbaOn = False
if numbaOn:
@njit(["void(float64[:], f8, float64[:], float64[:], f8, f8)"])
def velocityImplNumba(u, t, f, expVec, dxInvHalf, mu0):
n = len(u)
uSq = np.square(u)
f[0] = dxInvHalf * (math.pow(mu0, 2) - uSq[0]) + expVec[0]
for i in range(1,n):
f[i] = dxInvHalf * ( uSq[i-1] - uSq[i] ) + expVec[i]
else:
def velocityImplNumba(u, t, f, expVec, dxInvHalf, mu0):
n = len(u)
uSq = np.square(u)
f[0] = dxInvHalf * (math.pow(mu0, 2) - uSq[0]) + expVec[0]
for i in range(1,n):
f[i] = dxInvHalf * ( uSq[i-1] - uSq[i] ) + expVec[i]
if numbaOn:
@njit(["void(float64[:], float64[:], float64[:], f8)"])
def fillDiag(u, diag, ldiag, dxInv):
n = len(u)
for i in range(n-1):
diag[i] = -dxInv*u[i]
ldiag[i] = dxInv*u[i]
diag[n-1] = -dxInv*u[n-1]
else:
def fillDiag(u, diag, ldiag, dxInv):
n = len(u)
for i in range(n-1):
diag[i] = -dxInv*u[i]
ldiag[i] = dxInv*u[i]
diag[n-1] = -dxInv*u[n-1]
class Burgers1d:
def __init__(self, Ncell):
self.mu_ = np.array([5., 0.02, 0.02])
self.xL_ = 0.
self.xR_ = 100.
self.Ncell_ = Ncell
self.dx_ = 0.
self.dxInv_ = 0.
self.dxInvHalf_ = 0.
self.xGrid_ = np.zeros(self.Ncell_)
self.U0_ = np.zeros(self.Ncell_)
self.expVec_= np.zeros(self.Ncell_)
self.diag_ = np.zeros(self.Ncell_)
self.ldiag_ = np.zeros(self.Ncell_-1)
self.setup()
def setup(self):
self.dx_ = (self.xR_ - self.xL_)/float(self.Ncell_)
self.dxInv_ = (1.0/self.dx_)
self.dxInvHalf_ = 0.5 * self.dxInv_
for i in range(0, self.Ncell_):
self.U0_[i] = 1.
self.xGrid_[i] = self.dx_*i + self.dx_*0.5
self.expVec_ = self.mu_[1] * np.exp( self.mu_[2] * self.xGrid_ )
def createVelocity(self):
return np.zeros(self.Ncell_)
def velocity(self, u, t, f):
velocityImplNumba(u, t, f[:], self.expVec_,
self.dxInvHalf_, self.mu_[0])
def createApplyJacobianResult(self, B):
return np.zeros_like(B)
def applyJacobian(self, u, B, t, result):
J = self.jacobian(u, t)
result[:] = J.dot(B)
def jacobian(self, u, t):
fillDiag(u, self.diag_, self.ldiag_, self.dxInv_)
return diags( [self.ldiag_, self.diag_], [-1,0], format='csr')
| 28.113636 | 68 | 0.590542 | 1,283 | 0.518593 | 0 | 0 | 526 | 0.212611 | 0 | 0 | 105 | 0.042441 |
20608dda313ebd7ffbdc01a5aeefc1d8ecdc5d47 | 5,029 | py | Python | src/model/model.py | Alexei95/FasTrCaps | c0986b77ece9c562dcce06156dffcb592c1f6c11 | [
"MIT"
] | 2 | 2020-08-26T15:33:31.000Z | 2021-01-30T22:56:30.000Z | src/model/model.py | Alexei95/FasTrCaps | c0986b77ece9c562dcce06156dffcb592c1f6c11 | [
"MIT"
] | null | null | null | src/model/model.py | Alexei95/FasTrCaps | c0986b77ece9c562dcce06156dffcb592c1f6c11 | [
"MIT"
] | null | null | null | import math
import pathlib
import sys
import torch
import torch.nn as nn
PROJECT_DIR = pathlib.Path(__file__).absolute().parent.parent.parent # main directory, the parent of src
if str(PROJECT_DIR) not in sys.path:
sys.path.append(str(PROJECT_DIR))
from src.model.ConvLayer import ConvLayer
from src.model.PrimaryCaps import PrimaryCaps
from src.model.DigitCaps import DigitCaps
from src.model.Decoder import Decoder
INPUT_WIDTH = 28
NUM_CONV_IN_CHANNELS = 1
CONV_KERNEL = 9
CONV_STRIDE = 1
NUM_CONV_OUT_CHANNELS = 256
NUM_PRIMARY_CHANNELS = 32
PRIMARY_CAPS_DIM = 8
PRIMARY_KERNEL = 9
PRIMARY_STRIDE = 2
DIGIT_CAPS_DIM = 16
NUM_CLASSES = 10
REGULARIZATION_SCALE = 0.0005
ITER = 3
DEC1_DIM = 512
DEC2_DIM = 1024
CUDA_ENABLED = True
SMALL_DECODER = False
DEVICE = 'cuda:0'
CONV_SHARED_WEIGHTS = 0 # disabled
PRIMARY_SHARED_WEIGHTS = 0 # disabled
DIGIT_SHARED_WEIGHTS = 0 # disabled
CONV_SHARED_BIAS = CONV_SHARED_WEIGHTS # to have coherency as default
SQUASH_APPROX = False
class Net(nn.Module):
def __init__(self,
input_wh=INPUT_WIDTH,
num_conv_in_channels=NUM_CONV_IN_CHANNELS,
conv_kernel=CONV_KERNEL,
conv_stride=CONV_STRIDE,
num_conv_out_channels=NUM_CONV_OUT_CHANNELS,
num_primary_channels=NUM_PRIMARY_CHANNELS,
primary_caps_dim=PRIMARY_CAPS_DIM,
primary_kernel=PRIMARY_KERNEL,
primary_stride=PRIMARY_STRIDE,
digit_caps_dim=DIGIT_CAPS_DIM,
num_classes=NUM_CLASSES,
regularization_scale=REGULARIZATION_SCALE,
iter=ITER,
dec1_dim=DEC1_DIM,
dec2_dim=DEC2_DIM,
cuda_enabled=CUDA_ENABLED,
small_decoder=SMALL_DECODER,
device=DEVICE,
conv_shared_weights=CONV_SHARED_WEIGHTS,
primary_shared_weights=PRIMARY_SHARED_WEIGHTS,
digit_shared_weights=DIGIT_SHARED_WEIGHTS,
conv_shared_bias=CONV_SHARED_BIAS,
squash_approx=SQUASH_APPROX):
super(Net, self).__init__()
self.cuda_enabled = cuda_enabled
if cuda_enabled:
self.device = torch.device(device)
else:
self.device = torch.device('cpu')
self.regularization_scale = regularization_scale
conv_dimension = math.floor(
(input_wh-conv_kernel+conv_stride)/conv_stride)
primary_dimension = math.floor(
(conv_dimension-primary_kernel+primary_stride)/primary_stride)
self.conv = ConvLayer(in_channels=num_conv_in_channels,
out_channels=num_conv_out_channels,
kernel_size=conv_kernel,
stride=conv_stride,
cuda_enabled=cuda_enabled,
device=device,
shared_weights=conv_shared_weights,
shared_bias=conv_shared_bias)
self.primary = PrimaryCaps(in_channels=num_conv_out_channels,
out_channels=num_primary_channels,
out_caps_dim=primary_caps_dim,
kernel_size=primary_kernel,
stride=primary_stride,
cuda_enabled=cuda_enabled,
device=device,
shared_weights=primary_shared_weights,
squash_approx=squash_approx)
self.digit = DigitCaps(in_dim=num_primary_channels*primary_dimension*primary_dimension,
out_dim=num_classes,
in_caps_dim=primary_caps_dim,
out_caps_dim=digit_caps_dim,
iter=iter,
cuda_enabled=cuda_enabled,
device=device,
shared_weights=digit_shared_weights,
squash_approx=squash_approx)
decoder_in_dim = digit_caps_dim if small_decoder else num_classes * digit_caps_dim
self.decoder = Decoder(in_dim=decoder_in_dim,
l1_dim=dec1_dim,
l2_dim=dec2_dim,
out_dim=input_wh*input_wh,
device=device,
small_decoder=small_decoder)
def forward(self, x, labels, is_training=True):
out_conv = self.conv(x)
out_primary = self.primary(out_conv)
out_digit = self.digit(out_primary)
reconstruction = self.decoder(out_digit, labels, is_training)
return out_digit, reconstruction
| 38.684615 | 105 | 0.573673 | 4,002 | 0.795784 | 0 | 0 | 0 | 0 | 0 | 0 | 113 | 0.02247 |
2061c5f307d5379a34411f50ff8d63a1748c107a | 2,444 | py | Python | whyis/blueprint/entity/get_entity.py | aswallace/whyis | 10a3e19f2a35e66618b323c5ec74dd60eeec9ab7 | [
"Apache-2.0"
] | 31 | 2018-05-30T02:41:23.000Z | 2021-10-17T01:25:20.000Z | whyis/blueprint/entity/get_entity.py | aswallace/whyis | 10a3e19f2a35e66618b323c5ec74dd60eeec9ab7 | [
"Apache-2.0"
] | 115 | 2018-04-07T00:59:11.000Z | 2022-03-02T03:06:45.000Z | whyis/blueprint/entity/get_entity.py | aswallace/whyis | 10a3e19f2a35e66618b323c5ec74dd60eeec9ab7 | [
"Apache-2.0"
] | 25 | 2018-04-07T00:49:55.000Z | 2021-09-28T14:29:18.000Z | from flask import current_app, request, Response, make_response
from rdflib import ConjunctiveGraph
from werkzeug.exceptions import abort
from depot.middleware import FileServeApp
from .entity_blueprint import entity_blueprint
from whyis.data_extensions import DATA_EXTENSIONS
from whyis.data_formats import DATA_FORMATS
from whyis.decorator import conditional_login_required
import sadi.mimeparse
from whyis.html_mime_types import HTML_MIME_TYPES
@entity_blueprint.route('/about.<format>', methods=['GET'])
@entity_blueprint.route('/<path:name>', methods=['GET'])
@entity_blueprint.route('/<path:name>.<format>', methods=['GET'])
@entity_blueprint.route('/', methods=['GET'])
@entity_blueprint.route('/home', methods=['GET'])
@entity_blueprint.route('/about', methods=['GET'])
@conditional_login_required
def view(name=None, format=None, view=None):
current_app.db.store.nsBindings = {}
entity, content_type = current_app.get_entity_uri(name, format)
resource = current_app.get_resource(entity)
# 'view' is the default view
fileid = resource.value(current_app.NS.whyis.hasFileID)
if fileid is not None and 'view' not in request.args:
fileid = fileid.value
f = None
if current_app.nanopub_depot is not None and current_app.nanopub_depot.exists(fileid):
f = current_app.nanopub_depot.get(fileid)
elif current_app.file_depot.exists(fileid):
f = current_app.file_depot.get(fileid)
if f is not None:
fsa = FileServeApp(f, current_app.config["file_archive"].get("cache_max_age",3600*24*7))
return fsa
if content_type is None:
content_type = request.headers['Accept'] if 'Accept' in request.headers else 'text/turtle'
#print entity
fmt = sadi.mimeparse.best_match([mt for mt in list(DATA_FORMATS.keys()) if mt is not None],content_type)
if 'view' in request.args or fmt in HTML_MIME_TYPES:
return current_app.render_view(resource)
elif fmt in DATA_FORMATS:
output_graph = ConjunctiveGraph()
result, status, headers = current_app.render_view(resource, view='describe')
output_graph.parse(data=result, format="json-ld")
return output_graph.serialize(format=DATA_FORMATS[fmt]), 200, {'Content-Type':content_type}
#elif 'view' in request.args or sadi.mimeparse.best_match(htmls, content_type) in htmls:
else:
return current_app.render_view(resource)
| 43.642857 | 108 | 0.73036 | 0 | 0 | 0 | 0 | 1,991 | 0.814648 | 0 | 0 | 334 | 0.136661 |
2063c95543ab6e4ef6c980fc98b25c5894306406 | 9,816 | py | Python | tests/test_pipeline.py | phvu/cebes-python | 41e0a687feeac437eadcab1a4d1f0a041986bd4e | [
"Apache-2.0"
] | null | null | null | tests/test_pipeline.py | phvu/cebes-python | 41e0a687feeac437eadcab1a4d1f0a041986bd4e | [
"Apache-2.0"
] | null | null | null | tests/test_pipeline.py | phvu/cebes-python | 41e0a687feeac437eadcab1a4d1f0a041986bd4e | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 The Cebes Authors. All Rights Reserved.
#
# Licensed under the Apache License, version 2.0 (the "License").
# You may not use this work except in compliance with the License,
# which is available at www.apache.org/licenses/LICENSE-2.0
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied, as more fully set forth in the License.
#
# See the NOTICE file distributed with this work for information regarding copyright ownership.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import six
from pycebes.core import pipeline_api as pl
from pycebes.core.dataframe import Dataframe
from pycebes.core.exceptions import ServerException
from pycebes.core.pipeline import Pipeline, Model
from tests import test_base
class TestPipeline(test_base.TestBase):
def test_stage_general(self):
df = self.cylinder_bands
with Pipeline() as ppl:
s = pl.drop(df, ['hardener', 'customer'])
name = s.get_name()
self.assertIsNotNone(name)
with self.assertRaises(ValueError):
pl.drop(df, ['customer'], name=name)
self.assertIsInstance(ppl.stages, dict)
self.assertIsInstance(repr(ppl), six.string_types)
def test_drop(self):
df = self.cylinder_bands
with Pipeline() as ppl:
d = pl.drop(df, ['hardener', 'customer'], name='drop_stage')
df2 = ppl.run(d.output_df)
self.assertIsInstance(df2, Dataframe)
self.assertEqual(len(df2.columns) + 2, len(df.columns))
self.assertTrue('hardener' not in df2.columns)
self.assertTrue('customer' not in df2.columns)
# magic methods
self.assertTrue(d in ppl)
self.assertTrue('drop_stage' in ppl)
self.assertEqual(d, ppl['drop_stage'])
# cannot add more stages into the pipeline
with self.assertRaises(ValueError) as ex:
with ppl:
pl.drop(df, ['customer'])
self.assertIn('Cannot add more stage into this Pipeline', '{}'.format(ex.exception))
def test_placeholder(self):
with Pipeline() as ppl:
data = pl.placeholder(pl.PlaceholderTypes.DATAFRAME)
d = pl.drop(df=data, col_names=['hardener', 'customer'])
with self.assertRaises(ServerException) as ex:
ppl.run(d.output_df)
self.assertTrue('Input slot inputVal is undefined' in '{}'.format(ex.exception))
df = self.cylinder_bands
df2 = ppl.run(d.output_df, feeds={data: df})
self.assertIsInstance(df2, Dataframe)
self.assertEqual(len(df2.columns) + 2, len(df.columns))
self.assertTrue('hardener' not in df2.columns)
self.assertTrue('customer' not in df2.columns)
def test_value_placeholder(self):
with Pipeline() as ppl:
data = pl.placeholder(pl.PlaceholderTypes.DATAFRAME)
cols = pl.placeholder(pl.PlaceholderTypes.VALUE, value_type='array')
d = pl.drop(df=data, col_names=cols)
with self.assertRaises(ServerException) as ex:
ppl.run(d.output_df)
self.assertTrue('Input slot inputVal is undefined' in '{}'.format(ex.exception))
df = self.cylinder_bands
df2 = ppl.run(d.output_df, feeds={data: df, cols: ['hardener', 'customer']})
self.assertIsInstance(df2, Dataframe)
self.assertEqual(len(df2.columns) + 2, len(df.columns))
self.assertTrue('hardener' not in df2.columns)
self.assertTrue('customer' not in df2.columns)
def test_linear_regression_with_vector_assembler(self):
df = self.cylinder_bands
self.assertGreater(len(df), 10)
df = df.dropna(columns=['viscosity', 'proof_cut', 'caliper'])
self.assertGreater(len(df), 10)
with Pipeline() as ppl:
assembler = pl.vector_assembler(df, ['viscosity', 'proof_cut'], 'features')
s = pl.linear_regression(assembler.output_df, features_col='features',
label_col='caliper', prediction_col='caliper_predict', reg_param=0.001)
r = ppl.run([s.output_df, s.model, assembler.output_df])
self.assertEqual(len(r), 3)
df1 = r[0]
self.assertIsInstance(df1, Dataframe)
self.assertEqual(len(df1), len(df))
self.assertEqual(len(df1.columns), len(df.columns) + 2)
self.assertTrue('features' in df1.columns)
self.assertTrue('caliper_predict' in df1.columns)
m = r[1]
self.assertIsInstance(m, Model)
self.assertEqual(m.inputs['reg_param'], 0.001)
self.assertIsInstance(m.metadata, dict)
df2 = r[2]
self.assertIsInstance(df2, Dataframe)
self.assertEqual(len(df2), len(df))
self.assertEqual(len(df2.columns), len(df.columns) + 1)
self.assertTrue('features' in df2.columns)
def test_linear_regression_with_vector_assembler_with_placeholder(self):
# define the pipeline
with Pipeline() as ppl:
inp = pl.placeholder(pl.PlaceholderTypes.DATAFRAME)
assembler = pl.vector_assembler(inp, ['viscosity', 'proof_cut'], 'features')
lr = pl.linear_regression(assembler.output_df, features_col='features',
label_col='caliper', prediction_col='caliper_predict', reg_param=0.001)
# fail because placeholder is not filled
with self.assertRaises(ServerException) as ex:
ppl.run([lr.output_df, lr.model, assembler.output_df])
self.assertTrue('Input slot inputVal is undefined' in '{}'.format(ex.exception))
# run again with feeds into the placeholder
df = self.cylinder_bands.dropna(columns=['viscosity', 'proof_cut', 'caliper'])
self.assertGreater(len(df), 10)
r = ppl.run([lr.output_df, lr.model, assembler.output_df], feeds={inp: df})
self.assertEqual(len(r), 3)
df1 = r[0]
self.assertIsInstance(df1, Dataframe)
self.assertEqual(len(df1), len(df))
self.assertEqual(len(df1.columns), len(df.columns) + 2)
self.assertTrue('features' in df1.columns)
self.assertTrue('caliper_predict' in df1.columns)
pandas_df = df1.take(5)
self.assertEqual(len(pandas_df), 5)
m = r[1]
self.assertIsInstance(m, Model)
self.assertEqual(m.inputs['reg_param'], 0.001)
self.assertIsInstance(m.metadata, dict)
df2 = r[2]
self.assertIsInstance(df2, Dataframe)
self.assertEqual(len(df2), len(df))
self.assertEqual(len(df2.columns), len(df.columns) + 1)
self.assertTrue('features' in df2.columns)
# Run again with a different input dataframe, model ID shouldn't change
new_df = df.where(df.viscosity > 40)
r2 = ppl.run([lr.output_df, lr.model, assembler.output_df], feeds={inp: new_df})
self.assertEqual(r2[1].id, r[1].id)
def test_linear_regression_with_vector_assembler_with_placeholders(self):
# define the pipeline
with Pipeline() as ppl:
inp_df = pl.placeholder(pl.PlaceholderTypes.DATAFRAME)
inp_col = pl.placeholder(pl.PlaceholderTypes.VALUE)
assembler = pl.vector_assembler(inp_df, [''], inp_col)
s = pl.linear_regression(assembler.output_df, features_col='features',
label_col='caliper', prediction_col='caliper_predict', reg_param=0.001)
df = self.cylinder_bands.dropna(columns=['viscosity', 'proof_cut', 'caliper'])
self.assertGreater(len(df), 10)
r = ppl.run([s.output_df, s.model, assembler.output_df],
feeds={inp_df: df, inp_col: 'features', assembler.input_cols: ['viscosity', 'proof_cut']})
self.assertEqual(len(r), 3)
df1 = r[0]
self.assertIsInstance(df1, Dataframe)
self.assertEqual(len(df1), len(df))
self.assertEqual(len(df1.columns), len(df.columns) + 2)
self.assertTrue('features' in df1.columns)
self.assertTrue('caliper_predict' in df1.columns)
m = r[1]
self.assertIsInstance(m, Model)
self.assertEqual(m.inputs['reg_param'], 0.001)
self.assertIsInstance(m.metadata, dict)
df2 = r[2]
self.assertIsInstance(df2, Dataframe)
self.assertEqual(len(df2), len(df))
self.assertEqual(len(df2.columns), len(df.columns) + 1)
self.assertTrue('features' in df2.columns)
# assemble some other columns
df = self.cylinder_bands.dropna(columns=['viscosity', 'proof_cut', 'ink_temperature', 'caliper'])
self.assertGreater(len(df), 10)
r = ppl.run([s.output_df, s.model, assembler.output_df],
feeds={inp_df: df, inp_col: 'new_features',
assembler.input_cols: ['viscosity', 'proof_cut', 'ink_temperature'],
s.features_col: 'new_features'})
self.assertEqual(len(r), 3)
df1 = r[0]
self.assertIsInstance(df1, Dataframe)
self.assertEqual(len(df1), len(df))
self.assertEqual(len(df1.columns), len(df.columns) + 2)
self.assertTrue('new_features' in df1.columns)
self.assertTrue('caliper_predict' in df1.columns)
m = r[1]
self.assertIsInstance(m, Model)
self.assertEqual(m.inputs['reg_param'], 0.001)
self.assertIsInstance(m.metadata, dict)
df2 = r[2]
self.assertIsInstance(df2, Dataframe)
self.assertEqual(len(df2), len(df))
self.assertEqual(len(df2.columns), len(df.columns) + 1)
self.assertTrue('new_features' in df2.columns)
if __name__ == '__main__':
unittest.main()
| 40.9 | 110 | 0.640587 | 8,878 | 0.904442 | 0 | 0 | 0 | 0 | 0 | 0 | 1,778 | 0.181133 |
2064caf0142b4319c92d60dbabf59d75a465327e | 906 | py | Python | Chapter04/currency_converter/core/currency.py | ariwells2001/Python-Programming-Blueprints | 23981ab304e65bcc24560393c75fd5ee85c96ce5 | [
"MIT"
] | 72 | 2017-12-19T09:19:40.000Z | 2021-11-08T13:13:34.000Z | Chapter04/currency_converter/core/currency.py | ariwells2001/Python-Programming-Blueprints | 23981ab304e65bcc24560393c75fd5ee85c96ce5 | [
"MIT"
] | 20 | 2018-03-21T01:15:27.000Z | 2021-09-08T00:59:40.000Z | Chapter04/currency_converter/core/currency.py | ariwells2001/Python-Programming-Blueprints | 23981ab304e65bcc24560393c75fd5ee85c96ce5 | [
"MIT"
] | 53 | 2017-12-19T09:19:42.000Z | 2022-03-06T02:21:10.000Z | from enum import Enum
class Currency(Enum):
AUD = 'Australia Dollar'
BGN = 'Bulgaria Lev'
BRL = 'Brazil Real'
CAD = 'Canada Dollar'
CHF = 'Switzerland Franc'
CNY = 'China Yuan/Renminbi'
CZK = 'Czech Koruna'
DKK = 'Denmark Krone'
GBP = 'Great Britain Pound'
HKD = 'Hong Kong Dollar'
HRK = 'Croatia Kuna'
HUF = 'Hungary Forint'
IDR = 'Indonesia Rupiah'
ILS = 'Israel New Shekel'
INR = 'India Rupee'
JPY = 'Japan Yen'
KRW = 'South Korea Won'
MXN = 'Mexico Peso'
MYR = 'Malaysia Ringgit'
NOK = 'Norway Kroner'
NZD = 'New Zealand Dollar'
PHP = 'Philippines Peso'
PLN = 'Poland Zloty'
RON = 'Romania New Lei'
RUB = 'Russia Rouble'
SEK = 'Sweden Krona'
SGD = 'Singapore Dollar'
THB = 'Thailand Baht'
TRY = 'Turkish New Lira'
USD = 'USA Dollar'
ZAR = 'South Africa Rand'
EUR = 'Euro'
| 24.486486 | 31 | 0.590508 | 881 | 0.972406 | 0 | 0 | 0 | 0 | 0 | 0 | 508 | 0.560706 |
206a46f327f26de1a92c3d1bcec2ea680b114a57 | 6,822 | py | Python | kgtk/cli/wikidata_nodes_import.py | bhatiadivij/kgtk | 10890fa2c3460e199d327a0b66e0f71501738fe2 | [
"MIT"
] | null | null | null | kgtk/cli/wikidata_nodes_import.py | bhatiadivij/kgtk | 10890fa2c3460e199d327a0b66e0f71501738fe2 | [
"MIT"
] | null | null | null | kgtk/cli/wikidata_nodes_import.py | bhatiadivij/kgtk | 10890fa2c3460e199d327a0b66e0f71501738fe2 | [
"MIT"
] | null | null | null | """
Import wikidata nodes into KGTK file
"""
def parser():
return {
'help': 'Import wikidata nodes into KGTK file'
}
def add_arguments(parser):
"""
Parse arguments
Args:
parser (argparse.ArgumentParser)
"""
parser.add_argument("-i", action="store", type=str, dest="wikidat_file")
parser.add_argument("-o", action="store", type=str, dest="output_file")
parser.add_argument(
"-l",
action="store",
type=int,
dest="limit",
default=None)
parser.add_argument(
"-L",
action="store",
type=str,
dest="lang",
default="en")
parser.add_argument(
"-s",
action="store",
type=str,
dest="doc_id",
default="wikidata-20200203")
def run(wikidata_file, output_file, limit, lang, doc_id):
# import modules locally
import bz2
import json
import csv
site_filter = '{}wiki'.format(lang)
WD_META_ITEMS = [
"Q163875",
"Q191780",
"Q224414",
"Q4167836",
"Q4167410",
"Q4663903",
"Q11266439",
"Q13406463",
"Q15407973",
"Q18616576",
"Q19887878",
"Q22808320",
"Q23894233",
"Q33120876",
"Q42104522",
"Q47460393",
"Q64875536",
"Q66480449",
]
# filter: currently defined as OR: one hit suffices to be removed from
# further processing
exclude_list = WD_META_ITEMS
# punctuation
exclude_list.extend(["Q1383557", "Q10617810"])
# letters etc
exclude_list.extend(["Q188725", "Q19776628", "Q3841820",
"Q17907810", "Q9788", "Q9398093"])
neg_prop_filter = {
'P31': exclude_list, # instance of
'P279': exclude_list # subclass
}
title_to_id = dict()
id_to_descr = dict()
id_to_alias = dict()
to_print = False
# parse appropriate fields - depending on what we need in the KB
parse_properties = False
parse_descr = True
parse_sitelinks = True
parse_labels = True
parse_aliases = True
parse_claims = True
# create the header of the csv file
header = []
header.append('id')
if parse_labels:
header.append('label')
header.append('type')
if parse_descr:
header.append('descriptions')
if parse_aliases:
header.append('aliases')
header.append('document_id')
with open(output_file, 'w', newline='') as myfile:
wr = csv.writer(
myfile,
quoting=csv.QUOTE_NONE,
delimiter="\t",
escapechar="\n",
quotechar='')
wr.writerow(header)
rows = []
with bz2.open(wikidata_file, mode='rb') as file:
print('processing wikidata file now...')
for cnt, line in enumerate(file):
keep = False
if limit and cnt >= limit:
break
if cnt % 500000 == 0 and cnt > 0:
print('processed {} lines'.format(cnt))
clean_line = line.strip()
if clean_line.endswith(b","):
clean_line = clean_line[:-1]
if len(clean_line) > 1:
obj = json.loads(clean_line)
entry_type = obj["type"]
if entry_type == "item" or entry_type == "property":
keep = True
if keep:
row = []
qnode = obj["id"]
row.append(qnode)
if parse_labels:
labels = obj["labels"]
if labels:
lang_label = labels.get(lang, None)
if lang_label:
row.append(
'\'' + lang_label['value'] + '\'' + "@" + lang)
if to_print:
print(
"label (" + lang + "):", lang_label["value"])
else:
row.append("")
else:
row.append("")
row.append(entry_type)
if parse_descr:
descriptions = obj["descriptions"]
if descriptions:
lang_descr = descriptions.get(lang, None)
if lang_descr:
row.append(
'\'' + lang_descr['value'] + '\'' + "@" + lang)
if to_print:
print(
"description (" + lang + "):",
lang_descr["value"],
)
else:
row.append("")
else:
row.append("")
if parse_aliases:
aliases = obj["aliases"]
if aliases:
lang_aliases = aliases.get(lang, None)
if lang_aliases:
alias_list = []
for item in lang_aliases:
alias_list.append(
'\'' + item['value'] + '\'' + "@" + lang)
if to_print:
print(
"alias (" + lang + "):", item["value"])
row.append("|".join(alias_list))
else:
row.append('')
else:
row.append('')
row.append(doc_id)
rows.append(row)
if cnt % 50000 == 0 and cnt > 0:
with open(output_file, 'a', newline='') as myfile:
for row in rows:
wr = csv.writer(
myfile,
quoting=csv.QUOTE_NONE,
delimiter="\t",
escapechar="\n",
quotechar='')
wr.writerow(row)
rows = []
with open(output_file, 'a', newline='') as myfile:
for row in rows:
wr = csv.writer(
myfile,
quoting=csv.QUOTE_NONE,
delimiter="\t",
escapechar="\n",
quotechar='')
wr.writerow(row)
print('import complete')
| 31.730233 | 85 | 0.413515 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,213 | 0.177807 |
206bdceed901242fca25b737a0e8e945f5ce902c | 54,328 | py | Python | training/train_nav.py | catalina17/EmbodiedQA | 492c2e907697691899e7fe2102b0b859059d4efd | [
"BSD-3-Clause"
] | 289 | 2018-06-14T22:51:20.000Z | 2022-02-09T19:48:37.000Z | training/train_nav.py | catalina17/EmbodiedQA | 492c2e907697691899e7fe2102b0b859059d4efd | [
"BSD-3-Clause"
] | 27 | 2018-06-26T07:57:51.000Z | 2022-03-11T23:22:02.000Z | training/train_nav.py | catalina17/EmbodiedQA | 492c2e907697691899e7fe2102b0b859059d4efd | [
"BSD-3-Clause"
] | 66 | 2018-06-14T23:34:32.000Z | 2022-03-25T11:16:09.000Z | import time
import argparse
from datetime import datetime
import logging
import numpy as np
import os
import torch
import torch.nn.functional as F
import torch.multiprocessing as mp
from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel
from data import EqaDataLoader
from metrics import NavMetric
from models import MaskedNLLCriterion
from models import get_state, ensure_shared_grads
from data import load_vocab
from torch.autograd import Variable
from tqdm import tqdm
import time
torch.backends.cudnn.enabled = False
################################################################################################
#make models trained in pytorch 4 compatible with earlier pytorch versions
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
################################################################################################
def eval(rank, args, shared_model):
torch.cuda.set_device(args.gpus.index(args.gpus[rank % len(args.gpus)]))
if args.model_type == 'cnn':
model_kwargs = {}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'cnn+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'lstm':
model_kwargs = {}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm-mult+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnMultModel(**model_kwargs)
elif args.model_type == 'pacman':
model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}
model = NavPlannerControllerModel(**model_kwargs)
else:
exit()
eval_loader_kwargs = {
'questions_h5': getattr(args, args.eval_split + '_h5'),
'data_json': args.data_json,
'vocab': args.vocab_json,
'target_obj_conn_map_dir': args.target_obj_conn_map_dir,
'map_resolution': args.map_resolution,
'batch_size': 1,
'input_type': args.model_type,
'num_frames': 5,
'split': args.eval_split,
'max_threads_per_gpu': args.max_threads_per_gpu,
'gpu_id': args.gpus[rank % len(args.gpus)],
'to_cache': False,
'overfit': args.overfit,
'max_controller_actions': args.max_controller_actions,
}
eval_loader = EqaDataLoader(**eval_loader_kwargs)
print('eval_loader has %d samples' % len(eval_loader.dataset))
logging.info("EVAL: eval_loader has {} samples".format(len(eval_loader.dataset)))
args.output_log_path = os.path.join(args.log_dir,
'eval_' + str(rank) + '.json')
t, epoch, best_eval_acc = 0, 0, 0.0
max_epochs = args.max_epochs
if args.mode == 'eval':
max_epochs = 1
while epoch < int(max_epochs):
invalids = []
model.load_state_dict(shared_model.state_dict())
model.eval()
# that's a lot of numbers
metrics = NavMetric(
info={'split': args.eval_split,
'thread': rank},
metric_names=[
'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',
'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',
'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',
'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',
'ep_len_30', 'ep_len_50'
],
log_json=args.output_log_path)
if 'cnn' in args.model_type:
done = False
while done == False:
for batch in tqdm(eval_loader):
model.load_state_dict(shared_model.state_dict())
model.cuda()
idx, questions, _, img_feats, actions_in, actions_out, action_length = batch
metrics_slug = {}
# evaluate at multiple initializations
for i in [10, 30, 50]:
t += 1
if action_length[0] + 1 - i - 5 < 0:
invalids.append(idx[0])
continue
ep_inds = [
x for x in range(action_length[0] + 1 - i - 5,
action_length[0] + 1 - i)
]
sub_img_feats = torch.index_select(
img_feats, 1, torch.LongTensor(ep_inds))
init_pos = eval_loader.dataset.episode_pos_queue[
ep_inds[-1]]
h3d = eval_loader.dataset.episode_house
h3d.env.reset(
x=init_pos[0], y=init_pos[2], yaw=init_pos[3])
init_dist_to_target = h3d.get_dist_to_target(
h3d.env.cam.pos)
if init_dist_to_target < 0: # unreachable
invalids.append(idx[0])
continue
sub_img_feats_var = Variable(sub_img_feats.cuda())
if '+q' in args.model_type:
questions_var = Variable(questions.cuda())
# sample actions till max steps or <stop>
# max no. of actions = 100
episode_length = 0
episode_done = True
dists_to_target, pos_queue, actions = [
init_dist_to_target
], [init_pos], []
for step in range(args.max_episode_length):
episode_length += 1
if '+q' in args.model_type:
scores = model(sub_img_feats_var,
questions_var)
else:
scores = model(sub_img_feats_var)
prob = F.softmax(scores, dim=1)
action = int(prob.max(1)[1].data.cpu().numpy()[0])
actions.append(action)
img, _, episode_done = h3d.step(action)
episode_done = episode_done or episode_length >= args.max_episode_length
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224, 224)
.cuda())).view(1, 1, 3200)
sub_img_feats_var = torch.cat(
[sub_img_feats_var, img_feat_var], dim=1)
sub_img_feats_var = sub_img_feats_var[:, -5:, :]
dists_to_target.append(
h3d.get_dist_to_target(h3d.env.cam.pos))
pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.y,
h3d.env.cam.pos.z, h3d.env.cam.yaw
])
if episode_done == True:
break
# compute stats
metrics_slug['d_0_' + str(i)] = dists_to_target[0]
metrics_slug['d_T_' + str(i)] = dists_to_target[-1]
metrics_slug['d_D_' + str(
i)] = dists_to_target[0] - dists_to_target[-1]
metrics_slug['d_min_' + str(i)] = np.array(
dists_to_target).min()
metrics_slug['ep_len_' + str(i)] = episode_length
if action == 3:
metrics_slug['stop_' + str(i)] = 1
else:
metrics_slug['stop_' + str(i)] = 0
inside_room = []
for p in pos_queue:
inside_room.append(
h3d.is_inside_room(
p, eval_loader.dataset.target_room))
if inside_room[-1] == True:
metrics_slug['r_T_' + str(i)] = 1
else:
metrics_slug['r_T_' + str(i)] = 0
if any([x == True for x in inside_room]) == True:
metrics_slug['r_e_' + str(i)] = 1
else:
metrics_slug['r_e_' + str(i)] = 0
# collate and update metrics
metrics_list = []
for i in metrics.metric_names:
if i not in metrics_slug:
metrics_list.append(metrics.metrics[
metrics.metric_names.index(i)][0])
else:
metrics_list.append(metrics_slug[i])
# update metrics
metrics.update(metrics_list)
print(metrics.get_stat_string(mode=0))
print('invalids', len(invalids))
logging.info("EVAL: metrics: {}".format(metrics.get_stat_string(mode=0)))
logging.info("EVAL: invalids: {}".format(len(invalids)))
# del h3d
eval_loader.dataset._load_envs()
if len(eval_loader.dataset.pruned_env_set) == 0:
done = True
elif 'lstm' in args.model_type:
done = False
while done == False:
if args.overfit:
metrics = NavMetric(
info={'split': args.eval_split,
'thread': rank},
metric_names=[
'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',
'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',
'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',
'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',
'ep_len_30', 'ep_len_50'
],
log_json=args.output_log_path)
for batch in tqdm(eval_loader):
model.load_state_dict(shared_model.state_dict())
model.cuda()
idx, questions, answer, _, actions_in, actions_out, action_lengths, _ = batch
question_var = Variable(questions.cuda())
metrics_slug = {}
# evaluate at multiple initializations
for i in [10, 30, 50]:
t += 1
if action_lengths[0] - 1 - i < 0:
invalids.append([idx[0], i])
continue
h3d = eval_loader.dataset.episode_house
# forward through lstm till spawn
if len(eval_loader.dataset.episode_pos_queue[:-i]
) > 0:
images = eval_loader.dataset.get_frames(
h3d,
eval_loader.dataset.episode_pos_queue[:-i],
preprocess=True)
raw_img_feats = eval_loader.dataset.cnn(
Variable(torch.FloatTensor(images).cuda()))
actions_in_pruned = actions_in[:, :
action_lengths[0] -
i]
actions_in_var = Variable(actions_in_pruned.cuda())
action_lengths_pruned = action_lengths.clone(
).fill_(action_lengths[0] - i)
img_feats_var = raw_img_feats.view(1, -1, 3200)
if '+q' in args.model_type:
scores, hidden = model(
img_feats_var, question_var,
actions_in_var,
action_lengths_pruned.cpu().numpy())
else:
scores, hidden = model(
img_feats_var, False, actions_in_var,
action_lengths_pruned.cpu().numpy())
try:
init_pos = eval_loader.dataset.episode_pos_queue[
-i]
except:
invalids.append([idx[0], i])
continue
action_in = torch.LongTensor(1, 1).fill_(
actions_in[0,
action_lengths[0] - i]).cuda()
else:
init_pos = eval_loader.dataset.episode_pos_queue[
-i]
hidden = model.nav_rnn.init_hidden(1)
action_in = torch.LongTensor(1, 1).fill_(0).cuda()
h3d.env.reset(
x=init_pos[0], y=init_pos[2], yaw=init_pos[3])
init_dist_to_target = h3d.get_dist_to_target(
h3d.env.cam.pos)
if init_dist_to_target < 0: # unreachable
invalids.append([idx[0], i])
continue
img = h3d.env.render()
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224, 224).cuda())).view(
1, 1, 3200)
episode_length = 0
episode_done = True
dists_to_target, pos_queue, actions = [
init_dist_to_target
], [init_pos], []
actual_pos_queue = [(h3d.env.cam.pos.x, h3d.env.cam.pos.z, h3d.env.cam.yaw)]
for step in range(args.max_episode_length):
episode_length += 1
if '+q' in args.model_type:
scores, hidden = model(
img_feat_var,
question_var,
Variable(action_in),
False,
hidden=hidden,
step=True)
else:
scores, hidden = model(
img_feat_var,
False,
Variable(action_in),
False,
hidden=hidden,
step=True)
prob = F.softmax(scores, dim=1)
action = int(prob.max(1)[1].data.cpu().numpy()[0])
actions.append(action)
img, _, episode_done = h3d.step(action)
episode_done = episode_done or episode_length >= args.max_episode_length
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224, 224)
.cuda())).view(1, 1, 3200)
action_in = torch.LongTensor(
1, 1).fill_(action + 1).cuda()
dists_to_target.append(
h3d.get_dist_to_target(h3d.env.cam.pos))
pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.y,
h3d.env.cam.pos.z, h3d.env.cam.yaw
])
if episode_done == True:
break
actual_pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.z, h3d.env.cam.yaw])
# compute stats
metrics_slug['d_0_' + str(i)] = dists_to_target[0]
metrics_slug['d_T_' + str(i)] = dists_to_target[-1]
metrics_slug['d_D_' + str(
i)] = dists_to_target[0] - dists_to_target[-1]
metrics_slug['d_min_' + str(i)] = np.array(
dists_to_target).min()
metrics_slug['ep_len_' + str(i)] = episode_length
if action == 3:
metrics_slug['stop_' + str(i)] = 1
else:
metrics_slug['stop_' + str(i)] = 0
inside_room = []
for p in pos_queue:
inside_room.append(
h3d.is_inside_room(
p, eval_loader.dataset.target_room))
if inside_room[-1] == True:
metrics_slug['r_T_' + str(i)] = 1
else:
metrics_slug['r_T_' + str(i)] = 0
if any([x == True for x in inside_room]) == True:
metrics_slug['r_e_' + str(i)] = 1
else:
metrics_slug['r_e_' + str(i)] = 0
# collate and update metrics
metrics_list = []
for i in metrics.metric_names:
if i not in metrics_slug:
metrics_list.append(metrics.metrics[
metrics.metric_names.index(i)][0])
else:
metrics_list.append(metrics_slug[i])
# update metrics
metrics.update(metrics_list)
print(metrics.get_stat_string(mode=0))
print('invalids', len(invalids))
logging.info("EVAL: init_steps: {} metrics: {}".format(i, metrics.get_stat_string(mode=0)))
logging.info("EVAL: init_steps: {} invalids: {}".format(i, len(invalids)))
# del h3d
eval_loader.dataset._load_envs()
print("eval_loader pruned_env_set len: {}".format(len(eval_loader.dataset.pruned_env_set)))
logging.info("eval_loader pruned_env_set len: {}".format(len(eval_loader.dataset.pruned_env_set)))
assert len(eval_loader.dataset.pruned_env_set) > 0
if len(eval_loader.dataset.pruned_env_set) == 0:
done = True
elif 'pacman' in args.model_type:
done = False
while done == False:
if args.overfit:
metrics = NavMetric(
info={'split': args.eval_split,
'thread': rank},
metric_names=[
'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',
'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',
'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',
'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',
'ep_len_30', 'ep_len_50'
],
log_json=args.output_log_path)
for batch in tqdm(eval_loader):
model.load_state_dict(shared_model.state_dict())
model.cuda()
idx, question, answer, actions, action_length = batch
metrics_slug = {}
h3d = eval_loader.dataset.episode_house
# evaluate at multiple initializations
for i in [10, 30, 50]:
t += 1
if i > action_length[0]:
invalids.append([idx[0], i])
continue
question_var = Variable(question.cuda())
controller_step = False
planner_hidden = model.planner_nav_rnn.init_hidden(1)
# get hierarchical action history
(
planner_actions_in, planner_img_feats,
controller_step, controller_action_in,
controller_img_feats, init_pos,
controller_action_counter
) = eval_loader.dataset.get_hierarchical_features_till_spawn(
actions[0, :action_length[0] + 1].numpy(), i, args.max_controller_actions
)
planner_actions_in_var = Variable(
planner_actions_in.cuda())
planner_img_feats_var = Variable(
planner_img_feats.cuda())
# forward planner till spawn to update hidden state
for step in range(planner_actions_in.size(0)):
planner_scores, planner_hidden = model.planner_step(
question_var, planner_img_feats_var[step]
.unsqueeze(0).unsqueeze(0),
planner_actions_in_var[step].view(1, 1),
planner_hidden
)
h3d.env.reset(
x=init_pos[0], y=init_pos[2], yaw=init_pos[3])
init_dist_to_target = h3d.get_dist_to_target(
h3d.env.cam.pos)
if init_dist_to_target < 0: # unreachable
invalids.append([idx[0], i])
continue
dists_to_target, pos_queue, pred_actions = [
init_dist_to_target
], [init_pos], []
planner_actions, controller_actions = [], []
episode_length = 0
if args.max_controller_actions > 1:
controller_action_counter = controller_action_counter % args.max_controller_actions
controller_action_counter = max(controller_action_counter - 1, 0)
else:
controller_action_counter = 0
first_step = True
first_step_is_controller = controller_step
planner_step = True
action = int(controller_action_in)
for step in range(args.max_episode_length):
if not first_step:
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224,
224).cuda())).view(
1, 1, 3200)
else:
img_feat_var = Variable(controller_img_feats.cuda()).view(1, 1, 3200)
if not first_step or first_step_is_controller:
# query controller to continue or not
controller_action_in = Variable(
torch.LongTensor(1, 1).fill_(action).cuda())
controller_scores = model.controller_step(
img_feat_var, controller_action_in,
planner_hidden[0])
prob = F.softmax(controller_scores, dim=1)
controller_action = int(
prob.max(1)[1].data.cpu().numpy()[0])
if controller_action == 1 and controller_action_counter < args.max_controller_actions - 1:
controller_action_counter += 1
planner_step = False
else:
controller_action_counter = 0
planner_step = True
controller_action = 0
controller_actions.append(controller_action)
first_step = False
if planner_step:
if not first_step:
action_in = torch.LongTensor(
1, 1).fill_(action + 1).cuda()
planner_scores, planner_hidden = model.planner_step(
question_var, img_feat_var,
Variable(action_in), planner_hidden)
prob = F.softmax(planner_scores, dim=1)
action = int(
prob.max(1)[1].data.cpu().numpy()[0])
planner_actions.append(action)
episode_done = action == 3 or episode_length >= args.max_episode_length
episode_length += 1
dists_to_target.append(
h3d.get_dist_to_target(h3d.env.cam.pos))
pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.y,
h3d.env.cam.pos.z, h3d.env.cam.yaw
])
if episode_done:
break
img, _, _ = h3d.step(action)
first_step = False
# compute stats
metrics_slug['d_0_' + str(i)] = dists_to_target[0]
metrics_slug['d_T_' + str(i)] = dists_to_target[-1]
metrics_slug['d_D_' + str(
i)] = dists_to_target[0] - dists_to_target[-1]
metrics_slug['d_min_' + str(i)] = np.array(
dists_to_target).min()
metrics_slug['ep_len_' + str(i)] = episode_length
if action == 3:
metrics_slug['stop_' + str(i)] = 1
else:
metrics_slug['stop_' + str(i)] = 0
inside_room = []
for p in pos_queue:
inside_room.append(
h3d.is_inside_room(
p, eval_loader.dataset.target_room))
if inside_room[-1] == True:
metrics_slug['r_T_' + str(i)] = 1
else:
metrics_slug['r_T_' + str(i)] = 0
if any([x == True for x in inside_room]) == True:
metrics_slug['r_e_' + str(i)] = 1
else:
metrics_slug['r_e_' + str(i)] = 0
# collate and update metrics
metrics_list = []
for i in metrics.metric_names:
if i not in metrics_slug:
metrics_list.append(metrics.metrics[
metrics.metric_names.index(i)][0])
else:
metrics_list.append(metrics_slug[i])
# update metrics
metrics.update(metrics_list)
try:
print(metrics.get_stat_string(mode=0))
logging.info("EVAL: metrics: {}".format(metrics.get_stat_string(mode=0)))
except:
pass
print('epoch', epoch)
print('invalids', len(invalids))
logging.info("EVAL: epoch {}".format(epoch))
logging.info("EVAL: invalids {}".format(invalids))
# del h3d
eval_loader.dataset._load_envs()
if len(eval_loader.dataset.pruned_env_set) == 0:
done = True
epoch += 1
# checkpoint if best val loss
if metrics.metrics[8][0] > best_eval_acc: # d_D_50
best_eval_acc = metrics.metrics[8][0]
if epoch % args.eval_every == 0 and args.log == True:
metrics.dump_log()
model_state = get_state(model)
aad = dict(args.__dict__)
ad = {}
for i in aad:
if i[0] != '_':
ad[i] = aad[i]
checkpoint = {'args': ad, 'state': model_state, 'epoch': epoch}
checkpoint_path = '%s/epoch_%d_d_D_50_%.04f.pt' % (
args.checkpoint_dir, epoch, best_eval_acc)
print('Saving checkpoint to %s' % checkpoint_path)
logging.info("EVAL: Saving checkpoint to {}".format(checkpoint_path))
torch.save(checkpoint, checkpoint_path)
print('[best_eval_d_D_50:%.04f]' % best_eval_acc)
logging.info("EVAL: [best_eval_d_D_50:{:.04f}]".format(best_eval_acc))
eval_loader.dataset._load_envs(start_idx=0, in_order=True)
def train(rank, args, shared_model):
torch.cuda.set_device(args.gpus.index(args.gpus[rank % len(args.gpus)]))
if args.model_type == 'cnn':
model_kwargs = {}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'cnn+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'lstm':
model_kwargs = {}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm-mult+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnMultModel(**model_kwargs)
elif args.model_type == 'lstm+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'pacman':
model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}
model = NavPlannerControllerModel(**model_kwargs)
else:
exit()
lossFn = torch.nn.CrossEntropyLoss().cuda()
optim = torch.optim.Adamax(
filter(lambda p: p.requires_grad, shared_model.parameters()),
lr=args.learning_rate)
train_loader_kwargs = {
'questions_h5': args.train_h5,
'data_json': args.data_json,
'vocab': args.vocab_json,
'batch_size': args.batch_size,
'input_type': args.model_type,
'num_frames': 5,
'map_resolution': args.map_resolution,
'split': 'train',
'max_threads_per_gpu': args.max_threads_per_gpu,
'gpu_id': args.gpus[rank % len(args.gpus)],
'to_cache': args.cache,
'overfit': args.overfit,
'max_controller_actions': args.max_controller_actions,
'max_actions': args.max_actions
}
args.output_log_path = os.path.join(args.log_dir,
'train_' + str(rank) + '.json')
if 'pacman' in args.model_type:
metrics = NavMetric(
info={'split': 'train',
'thread': rank},
metric_names=['planner_loss', 'controller_loss'],
log_json=args.output_log_path)
else:
metrics = NavMetric(
info={'split': 'train',
'thread': rank},
metric_names=['loss'],
log_json=args.output_log_path)
train_loader = EqaDataLoader(**train_loader_kwargs)
print('train_loader has %d samples' % len(train_loader.dataset))
logging.info('TRAIN: train loader has {} samples'.format(len(train_loader.dataset)))
t, epoch = 0, 0
while epoch < int(args.max_epochs):
if 'cnn' in args.model_type:
done = False
all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()
while done == False:
for batch in train_loader:
t += 1
model.load_state_dict(shared_model.state_dict())
model.train()
model.cuda()
idx, questions, _, img_feats, _, actions_out, _ = batch
img_feats_var = Variable(img_feats.cuda())
if '+q' in args.model_type:
questions_var = Variable(questions.cuda())
actions_out_var = Variable(actions_out.cuda())
if '+q' in args.model_type:
scores = model(img_feats_var, questions_var)
else:
scores = model(img_feats_var)
loss = lossFn(scores, actions_out_var)
# zero grad
optim.zero_grad()
# update metrics
metrics.update([loss.data[0]])
# backprop and update
loss.backward()
ensure_shared_grads(model.cpu(), shared_model)
optim.step()
if t % args.print_every == 0:
print(metrics.get_stat_string())
logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))
if args.log == True:
metrics.dump_log()
print('[CHECK][Cache:%d][Total:%d]' %
(len(train_loader.dataset.img_data_cache),
len(train_loader.dataset.env_list)))
logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(
len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))
if all_envs_loaded == False:
train_loader.dataset._load_envs(in_order=True)
if len(train_loader.dataset.pruned_env_set) == 0:
done = True
if args.cache == False:
train_loader.dataset._load_envs(
start_idx=0, in_order=True)
else:
done = True
elif 'lstm' in args.model_type:
lossFn = MaskedNLLCriterion().cuda()
done = False
all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()
total_times = []
while done == False:
start_time = time.time()
for batch in train_loader:
t += 1
model.load_state_dict(shared_model.state_dict())
model.train()
model.cuda()
idx, questions, _, img_feats, actions_in, actions_out, action_lengths, masks = batch
img_feats_var = Variable(img_feats.cuda())
if '+q' in args.model_type:
questions_var = Variable(questions.cuda())
actions_in_var = Variable(actions_in.cuda())
actions_out_var = Variable(actions_out.cuda())
action_lengths = action_lengths.cuda()
masks_var = Variable(masks.cuda())
action_lengths, perm_idx = action_lengths.sort(
0, descending=True)
img_feats_var = img_feats_var[perm_idx]
if '+q' in args.model_type:
questions_var = questions_var[perm_idx]
actions_in_var = actions_in_var[perm_idx]
actions_out_var = actions_out_var[perm_idx]
masks_var = masks_var[perm_idx]
if '+q' in args.model_type:
scores, hidden = model(img_feats_var, questions_var,
actions_in_var,
action_lengths.cpu().numpy())
else:
scores, hidden = model(img_feats_var, False,
actions_in_var,
action_lengths.cpu().numpy())
#block out masks
if args.curriculum:
curriculum_length = (epoch+1)*5
for i, action_length in enumerate(action_lengths):
if action_length - curriculum_length > 0:
masks_var[i, :action_length-curriculum_length] = 0
logprob = F.log_softmax(scores, dim=1)
loss = lossFn(
logprob, actions_out_var[:, :action_lengths.max()]
.contiguous().view(-1, 1),
masks_var[:, :action_lengths.max()].contiguous().view(
-1, 1))
# zero grad
optim.zero_grad()
# update metrics
metrics.update([loss.data[0]])
logging.info("TRAIN LSTM loss: {:.6f}".format(loss.data[0]))
# backprop and update
loss.backward()
ensure_shared_grads(model.cpu(), shared_model)
optim.step()
if t % args.print_every == 0:
print(metrics.get_stat_string())
logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))
if args.log == True:
metrics.dump_log()
print('[CHECK][Cache:%d][Total:%d]' %
(len(train_loader.dataset.img_data_cache),
len(train_loader.dataset.env_list)))
logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(
len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))
if all_envs_loaded == False:
train_loader.dataset._load_envs(in_order=True)
if len(train_loader.dataset.pruned_env_set) == 0:
done = True
if args.cache == False:
train_loader.dataset._load_envs(
start_idx=0, in_order=True)
else:
done = True
elif 'pacman' in args.model_type:
planner_lossFn = MaskedNLLCriterion().cuda()
controller_lossFn = MaskedNLLCriterion().cuda()
done = False
all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()
while done == False:
for batch in train_loader:
t += 1
model.load_state_dict(shared_model.state_dict())
model.train()
model.cuda()
idx, questions, _, planner_img_feats, planner_actions_in, \
planner_actions_out, planner_action_lengths, planner_masks, \
controller_img_feats, controller_actions_in, planner_hidden_idx, \
controller_outs, controller_action_lengths, controller_masks = batch
questions_var = Variable(questions.cuda())
planner_img_feats_var = Variable(planner_img_feats.cuda())
planner_actions_in_var = Variable(
planner_actions_in.cuda())
planner_actions_out_var = Variable(
planner_actions_out.cuda())
planner_action_lengths = planner_action_lengths.cuda()
planner_masks_var = Variable(planner_masks.cuda())
controller_img_feats_var = Variable(
controller_img_feats.cuda())
controller_actions_in_var = Variable(
controller_actions_in.cuda())
planner_hidden_idx_var = Variable(
planner_hidden_idx.cuda())
controller_outs_var = Variable(controller_outs.cuda())
controller_action_lengths = controller_action_lengths.cuda(
)
controller_masks_var = Variable(controller_masks.cuda())
planner_action_lengths, perm_idx = planner_action_lengths.sort(
0, descending=True)
questions_var = questions_var[perm_idx]
planner_img_feats_var = planner_img_feats_var[perm_idx]
planner_actions_in_var = planner_actions_in_var[perm_idx]
planner_actions_out_var = planner_actions_out_var[perm_idx]
planner_masks_var = planner_masks_var[perm_idx]
controller_img_feats_var = controller_img_feats_var[
perm_idx]
controller_actions_in_var = controller_actions_in_var[
perm_idx]
controller_outs_var = controller_outs_var[perm_idx]
planner_hidden_idx_var = planner_hidden_idx_var[perm_idx]
controller_action_lengths = controller_action_lengths[
perm_idx]
controller_masks_var = controller_masks_var[perm_idx]
planner_scores, controller_scores, planner_hidden = model(
questions_var, planner_img_feats_var,
planner_actions_in_var,
planner_action_lengths.cpu().numpy(),
planner_hidden_idx_var, controller_img_feats_var,
controller_actions_in_var, controller_action_lengths)
planner_logprob = F.log_softmax(planner_scores, dim=1)
controller_logprob = F.log_softmax(
controller_scores, dim=1)
planner_loss = planner_lossFn(
planner_logprob,
planner_actions_out_var[:, :planner_action_lengths.max(
)].contiguous().view(-1, 1),
planner_masks_var[:, :planner_action_lengths.max()]
.contiguous().view(-1, 1))
controller_loss = controller_lossFn(
controller_logprob,
controller_outs_var[:, :controller_action_lengths.max(
)].contiguous().view(-1, 1),
controller_masks_var[:, :controller_action_lengths.max(
)].contiguous().view(-1, 1))
# zero grad
optim.zero_grad()
# update metrics
metrics.update(
[planner_loss.data[0], controller_loss.data[0]])
logging.info("TRAINING PACMAN planner-loss: {:.6f} controller-loss: {:.6f}".format(
planner_loss.data[0], controller_loss.data[0]))
# backprop and update
if args.max_controller_actions == 1:
(planner_loss).backward()
else:
(planner_loss + controller_loss).backward()
ensure_shared_grads(model.cpu(), shared_model)
optim.step()
if t % args.print_every == 0:
print(metrics.get_stat_string())
logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))
if args.log == True:
metrics.dump_log()
print('[CHECK][Cache:%d][Total:%d]' %
(len(train_loader.dataset.img_data_cache),
len(train_loader.dataset.env_list)))
logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(
len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))
if all_envs_loaded == False:
train_loader.dataset._load_envs(in_order=True)
if len(train_loader.dataset.pruned_env_set) == 0:
done = True
if args.cache == False:
train_loader.dataset._load_envs(
start_idx=0, in_order=True)
else:
done = True
epoch += 1
if epoch % args.save_every == 0:
model_state = get_state(model)
optimizer_state = optim.state_dict()
aad = dict(args.__dict__)
ad = {}
for i in aad:
if i[0] != '_':
ad[i] = aad[i]
checkpoint = {'args': ad,
'state': model_state,
'epoch': epoch,
'optimizer': optimizer_state}
checkpoint_path = '%s/epoch_%d_thread_%d.pt' % (
args.checkpoint_dir, epoch, rank)
print('Saving checkpoint to %s' % checkpoint_path)
logging.info("TRAIN: Saving checkpoint to {}".format(checkpoint_path))
torch.save(checkpoint, checkpoint_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# data params
parser.add_argument('-train_h5', default='data/train.h5')
parser.add_argument('-val_h5', default='data/val.h5')
parser.add_argument('-test_h5', default='data/test.h5')
parser.add_argument('-data_json', default='data/data.json')
parser.add_argument('-vocab_json', default='data/vocab.json')
parser.add_argument(
'-target_obj_conn_map_dir',
default='data/target-obj-conn-maps/500')
parser.add_argument('-map_resolution', default=500, type=int)
parser.add_argument(
'-mode',
default='train+eval',
type=str,
choices=['train', 'eval', 'train+eval'])
parser.add_argument('-eval_split', default='val', type=str)
# model details
parser.add_argument(
'-model_type',
default='cnn',
choices=['cnn', 'cnn+q', 'lstm', 'lstm+q', 'lstm-mult+q', 'pacman'])
parser.add_argument('-max_episode_length', default=100, type=int)
parser.add_argument('-curriculum', default=0, type=int)
# optim params
parser.add_argument('-batch_size', default=20, type=int)
parser.add_argument('-learning_rate', default=1e-3, type=float)
parser.add_argument('-max_epochs', default=1000, type=int)
parser.add_argument('-overfit', default=False, action='store_true')
# bookkeeping
parser.add_argument('-print_every', default=5, type=int)
parser.add_argument('-eval_every', default=1, type=int)
parser.add_argument('-save_every', default=1000, type=int) #optional if you would like to save specific epochs as opposed to relying on the eval thread
parser.add_argument('-identifier', default='cnn')
parser.add_argument('-num_processes', default=1, type=int)
parser.add_argument('-max_threads_per_gpu', default=10, type=int)
# checkpointing
parser.add_argument('-checkpoint_path', default=False)
parser.add_argument('-checkpoint_dir', default='checkpoints/nav/')
parser.add_argument('-log_dir', default='logs/nav/')
parser.add_argument('-log', default=False, action='store_true')
parser.add_argument('-cache', default=False, action='store_true')
parser.add_argument('-max_controller_actions', type=int, default=5)
parser.add_argument('-max_actions', type=int)
args = parser.parse_args()
args.time_id = time.strftime("%m_%d_%H:%M")
#MAX_CONTROLLER_ACTIONS = args.max_controller_actions
if not os.path.isdir(args.log_dir):
os.makedirs(args.log_dir)
if args.curriculum:
assert 'lstm' in args.model_type #TODO: Finish implementing curriculum for other model types
logging.basicConfig(filename=os.path.join(args.log_dir, "run_{}.log".format(
str(datetime.now()).replace(' ', '_'))),
level=logging.INFO,
format='%(asctime)-15s %(message)s')
try:
args.gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
args.gpus = [int(x) for x in args.gpus]
except KeyError:
print("CPU not supported")
logging.info("CPU not supported")
exit()
if args.checkpoint_path != False:
print('Loading checkpoint from %s' % args.checkpoint_path)
logging.info("Loading checkpoint from {}".format(args.checkpoint_path))
args_to_keep = ['model_type']
checkpoint = torch.load(args.checkpoint_path, map_location={
'cuda:0': 'cpu'
})
for i in args.__dict__:
if i not in args_to_keep:
checkpoint['args'][i] = args.__dict__[i]
args = type('new_dict', (object, ), checkpoint['args'])
args.checkpoint_dir = os.path.join(args.checkpoint_dir,
args.time_id + '_' + args.identifier)
args.log_dir = os.path.join(args.log_dir,
args.time_id + '_' + args.identifier)
# if set to overfit; set eval_split to train
if args.overfit == True:
args.eval_split = 'train'
print(args.__dict__)
logging.info(args.__dict__)
if not os.path.exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
os.makedirs(args.log_dir)
if args.model_type == 'cnn':
model_kwargs = {}
shared_model = NavCnnModel(**model_kwargs)
elif args.model_type == 'cnn+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
shared_model = NavCnnModel(**model_kwargs)
elif args.model_type == 'lstm':
model_kwargs = {}
shared_model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
shared_model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'pacman':
model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}
shared_model = NavPlannerControllerModel(**model_kwargs)
else:
exit()
shared_model.share_memory()
if args.checkpoint_path != False:
print('Loading params from checkpoint: %s' % args.checkpoint_path)
logging.info("Loading params from checkpoint: {}".format(args.checkpoint_path))
shared_model.load_state_dict(checkpoint['state'])
if args.mode == 'eval':
eval(0, args, shared_model)
elif args.mode == 'train':
if args.num_processes > 1:
processes = []
for rank in range(0, args.num_processes):
# for rank in range(0, args.num_processes):
p = mp.Process(target=train, args=(rank, args, shared_model))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
train(0, args, shared_model)
else:
processes = []
# Start the eval thread
p = mp.Process(target=eval, args=(0, args, shared_model))
p.start()
processes.append(p)
# Start the training thread(s)
for rank in range(1, args.num_processes + 1):
# for rank in range(0, args.num_processes):
p = mp.Process(target=train, args=(rank, args, shared_model))
p.start()
processes.append(p)
for p in processes:
p.join()
| 40.84812 | 155 | 0.475445 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,470 | 0.100685 |
206bea68e024108a3072a57ecb2075b2c8f91020 | 1,300 | py | Python | yarll/scripts/list_exps.py | hknozturk/yarll | c5293e6455e3debe6e4d4d21f713937a24a654f3 | [
"MIT"
] | 62 | 2016-11-05T19:27:11.000Z | 2018-09-20T13:29:39.000Z | yarll/scripts/list_exps.py | hknozturk/yarll | c5293e6455e3debe6e4d4d21f713937a24a654f3 | [
"MIT"
] | 4 | 2020-07-09T16:46:19.000Z | 2022-01-26T07:18:06.000Z | yarll/scripts/list_exps.py | hknozturk/yarll | c5293e6455e3debe6e4d4d21f713937a24a654f3 | [
"MIT"
] | 18 | 2016-11-24T14:17:15.000Z | 2018-07-04T16:33:00.000Z | import os
import json
import argparse
from pathlib import Path
import pandas as pd
import dateutil
parser = argparse.ArgumentParser()
parser.add_argument("directory", type=Path help="Path to the directory.")
def main():
args = parser.parse_args()
dirs = sorted([d for d in os.listdir(args.directory) if os.path.isdir(args.directory / d)], key=lambda x: int(x[3:]))
header = ["RUN", "DESCR", "START", "BRANCH", "COMMITMSG"]
data = []
for d in dirs:
config_path = args.directory / d / "config.json"
if os.path.exists(config_path):
with open(config_path) as f:
config = json.load(f)
else:
config = {}
run_data = [
d,
config.get("description", ""),
dateutil.parser.parse(config["start_time"]).strftime("%d/%m/%y %H:%M") if "start_time" in config else "",
]
run_data += [config["git"]["head"], config["git"]["message"]] if "git" in config else [""] * 2
data.append(run_data)
df = pd.DataFrame(data, columns=header)
df.set_index("RUN", inplace=True)
with pd.option_context('display.max_rows', None, 'display.max_columns', None, "display.width", None, "display.max_colwidth", 100):
print(df)
if __name__ == '__main__':
main()
| 33.333333 | 134 | 0.606154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 266 | 0.204615 |
206c60f444384827b5b58347d59f704ffc3951d0 | 964 | py | Python | src/malign/alignment.py | tresoldi/malign | dad7f2585db3b12f2edbf587f591463aed7c98f5 | [
"MIT"
] | null | null | null | src/malign/alignment.py | tresoldi/malign | dad7f2585db3b12f2edbf587f591463aed7c98f5 | [
"MIT"
] | 1 | 2020-08-07T13:01:29.000Z | 2020-08-07T13:01:29.000Z | src/malign/alignment.py | tresoldi/malign | dad7f2585db3b12f2edbf587f591463aed7c98f5 | [
"MIT"
] | null | null | null | """
Module for the Alignment class.
The `Alignment` class is a simple data class that holds aligned sequences and
their score. It was originally a dictionary passed back and forth among
functions, for which a data class is a good replacement.
"""
from dataclasses import dataclass
from typing import Sequence, Hashable
# TODO: write methods for comparison, based on score
# TODO: add various checks post-initialization
@dataclass
class Alignment:
seqs: Sequence[Hashable]
score: float
def __len__(self) -> int:
"""
Return the number of sequences in the alignment.
@return: The number of sequences in the alignment.
"""
return len(self.seqs)
def __getitem__(self, idx: int) -> Hashable:
"""
Return a sequence by its index.
@param idx: The index of the sequence in the alignment.
@return: The sequence at the requested index.
"""
return self.seqs[idx]
| 25.368421 | 77 | 0.678423 | 527 | 0.54668 | 0 | 0 | 538 | 0.558091 | 0 | 0 | 651 | 0.675311 |
206d8b5b08e4365864d96a5cc41f73122c66ff99 | 1,305 | py | Python | 0236_Lowest_Common_Ancestor_of_a_Binary_Tree.py | coldmanck/leetcode-python | f644b8a0711c96f312326b4d025e9be3340fec42 | [
"MIT"
] | 4 | 2021-01-11T09:53:58.000Z | 2022-01-18T13:11:54.000Z | 0236_Lowest_Common_Ancestor_of_a_Binary_Tree.py | coldmanck/leetcode-python | f644b8a0711c96f312326b4d025e9be3340fec42 | [
"MIT"
] | null | null | null | 0236_Lowest_Common_Ancestor_of_a_Binary_Tree.py | coldmanck/leetcode-python | f644b8a0711c96f312326b4d025e9be3340fec42 | [
"MIT"
] | 2 | 2020-04-13T13:55:48.000Z | 2020-08-25T16:16:11.000Z | # Runtime: 84 ms, faster than 22.95% of Python3 online submissions for Lowest Common Ancestor of a Binary Tree.
# Memory Usage: 23.1 MB, less than 91.67% of Python3 online submissions for Lowest Common Ancestor of a Binary Tree.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
# Method 1
# ans = [None]
# def lca(node):
# if not node:
# return False
# mid = (node is p or node is q)
# left = lca(node.left)
# right = lca(node.right)
# if mid + left + right >= 2:
# ans[0] = node
# return mid or left or right
# lca(root)
# return ans[0]
# Method 2
# Time: O(n)
# Space: O(h)
if root is None:
return None
if root is p or root is q:
return root
left = self.lowestCommonAncestor(root.left, p, q)
right = self.lowestCommonAncestor(root.right, p, q)
if left and right:
return root
else:
return left or right | 33.461538 | 116 | 0.532567 | 912 | 0.698851 | 0 | 0 | 0 | 0 | 0 | 0 | 745 | 0.570881 |
206ef09a2bc28f7ba5a8ff01cc6d9883d5038da6 | 18,167 | py | Python | src/main/python/scrumtools/github.py | TU-Berlin-DIMA/scrum-tools | f17b39f815d01b7a6f1e2b3cd46d7e99e3cf3118 | [
"Apache-2.0"
] | 1 | 2015-05-23T05:19:32.000Z | 2015-05-23T05:19:32.000Z | src/main/python/scrumtools/github.py | TU-Berlin-DIMA/scrum-tools | f17b39f815d01b7a6f1e2b3cd46d7e99e3cf3118 | [
"Apache-2.0"
] | null | null | null | src/main/python/scrumtools/github.py | TU-Berlin-DIMA/scrum-tools | f17b39f815d01b7a6f1e2b3cd46d7e99e3cf3118 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2010-2014 DIMA Research Group, TU Berlin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on Apr 13, 2014
"""
from __future__ import absolute_import
import os
import sys
import socket
# noinspection PyPackageRequirements
from github3 import login, models
from scrumtools import data, error
from termcolor import cprint, colored
from cement.core import controller
from requests.exceptions import ConnectionError
try:
prompt = raw_input
except NameError:
prompt = input
class GitHubController(controller.CementBaseController):
class Meta:
label = 'github'
interface = controller.IController
stacked_on = 'base'
stacked_type = 'nested'
description = "A set of batch management tools for GitHub."
config_section = 'github'
config_defaults = dict(
auth_id=None,
auth_token=None,
organization='example.org',
team_admins='example.admins',
team_admins_group=-1,
team_users='example.users',
team_pattern='example.g%02d',
repo_admins='example',
repo_users='example',
repo_pattern='example.g%02d',
)
arguments = [
(['-U', '--users-file'],
dict(action='store', metavar='FILE', dest='users_file',
help='a CSV file listing all users')),
(['-O', '--organization'],
dict(action='store', metavar='NAME', dest='organization',
help='the organization managing the GitHub repositories')),
]
@controller.expose(hide=True)
def default(self):
self.app.args.parse_args(['--help'])
@controller.expose(help="Authorizes scrum-tools with a GitHub account.")
def authorize(self):
self.app.log.debug('Authorizing a GitHub user.')
(username, password) = self.__class__.prompt_login()
try:
gh = login(username, password, two_factor_callback=self.__class__.prompt_two_factor_login)
au = gh.authorize(username,
password,
scopes=['repo', 'delete_repo', 'admin:org'],
note='Scrum-tools on %s' % socket.gethostname())
cprint(os.linesep.join(["Please copy these lines into the [github] section of your scrum-tools config:",
" auth_id = %s " % au.id,
" auth_token = %s " % au.token]), 'green')
except (models.GitHubError, ConnectionError) as e:
raise RuntimeError(e.msg)
@controller.expose(help="Validate the provided GitHub account names.")
def validate_users(self):
self.app.log.debug('Validating GitHub account names.')
# validate required config parameters
if not self.app.config.get('github', 'auth_token') or not self.app.config.get('github', 'auth_id'):
raise error.ConfigError("Missing config parameter 'github.auth_id' and/or 'github.auth_token'! "
"Please run 'scrum-tools github authorize' first! ")
key_username = self.app.config.get('core', 'users_schema_key_username')
key_github = self.app.config.get('core', 'users_schema_key_github')
user_repository = data.UserRepository(self.app.config)
gh = login(token=self.app.config.get('github', 'auth_token'))
for u in user_repository.users():
if not u[key_github]:
cprint("Skipping empty GitHub account for user '%s'." % u[key_username], 'yellow', file=sys.stdout)
continue
print colored("Validating GitHub account '%s' for user '%s'..." % (u[key_github], u[key_username]), 'green'),
try:
if gh.user(u[key_github]):
print colored('OK', 'green', attrs=['bold'])
else:
raise RuntimeError("Github user '%s' not found" % u[key_github])
except RuntimeError:
print colored('Not OK', 'red', attrs=['bold'])
@controller.expose(help="Creates GitHub repositories.")
def create_repos(self):
self.app.log.debug('Creating GitHub repositories.')
# validate required config parameters
if not self.app.config.get('github', 'auth_token') or not self.app.config.get('github', 'auth_id'):
raise error.ConfigError("Missing config parameter 'github.auth_id' and/or 'github.auth_token'! "
"Please run 'scrum-tools github authorize' first! ")
# organization
organization = self.app.config.get('github', 'organization')
# teams setup
team_admins = self.app.config.get('github', 'team_admins')
team_users = self.app.config.get('github', 'team_users')
team_pattern = self.app.config.get('github', 'team_pattern')
# repos setup
repo_admins = self.app.config.get('github', 'repo_admins')
repo_users = self.app.config.get('github', 'repo_users')
repo_pattern = self.app.config.get('github', 'repo_pattern')
# get the users
user_repository = data.UserRepository(self.app.config)
# create github session
gh = login(token=self.app.config.get('github', 'auth_token'))
# get the organization
org = gh.organization(organization)
if not org:
raise RuntimeError("Organization '%s' not found" % organization)
# get all organization repos
teams = dict((t.name, t) for t in org.iter_teams())
repos = dict((r.name, r) for r in org.iter_repos())
# create group repos
for group in user_repository.groups():
repo_group = repo_pattern % int(group)
team_group = team_pattern % int(group)
repo_teams = [v for (k, v) in teams.iteritems() if k in [team_group, team_admins]]
self.__class__.__create_repo(org, repo_group, repo_teams, repos)
# create admins repo
repo_teams = [v for (k, v) in teams.iteritems() if k in [team_admins]]
self.__class__.__create_repo(org, repo_admins, repo_teams, repos)
# create users repo
repo_teams = [v for (k, v) in teams.iteritems() if k in [team_admins, team_users]]
self.__class__.__create_repo(org, repo_users, repo_teams, repos)
@controller.expose(help="Deletes GitHub repositories.")
def delete_repos(self):
self.app.log.debug('Deleting GitHub repositories.')
if not self.__class__.prompt_confirm(colored('This cannot be undone! Proceed? (yes/no): ', 'red')):
cprint("Aborting delete command.", 'yellow', file=sys.stdout)
return
# validate required config parameters
if not self.app.config.get('github', 'auth_token') or not self.app.config.get('github', 'auth_id'):
raise error.ConfigError("Missing config parameter 'github.auth_id' and/or 'github.auth_token'! "
"Please run 'scrum-tools github authorize' first! ")
# organization
organization = self.app.config.get('github', 'organization')
# repos setup
repo_admins = self.app.config.get('github', 'repo_admins')
repo_users = self.app.config.get('github', 'repo_users')
repo_pattern = self.app.config.get('github', 'repo_pattern')
user_repository = data.UserRepository(self.app.config)
gh = login(token=self.app.config.get('github', 'auth_token'))
# get the organization
org = gh.organization(organization)
if not org:
raise RuntimeError("Organization '%s' not found" % organization)
# get all organization repos
repos = dict((t.name, t) for t in org.iter_repos())
# delete group repos
for group in user_repository.groups():
repo_name = repo_pattern % int(group)
self.__class__.__delete_repo(repo_name, repos)
# delete admins repo
self.__class__.__delete_repo(repo_admins, repos)
# delete users repo
self.__class__.__delete_repo(repo_users, repos)
@controller.expose(help="Creates GitHub teams.")
def create_teams(self):
self.app.log.debug('Creating GitHub teams.')
# validate required config parameters
if not self.app.config.get('github', 'auth_token') or not self.app.config.get('github', 'auth_id'):
raise error.ConfigError("Missing config parameter 'github.auth_id' and/or 'github.auth_token'! "
"Please run 'scrum-tools github authorize' first! ")
# schema keys
key_group = self.app.config.get('core', 'users_schema_key_group')
key_github = self.app.config.get('core', 'users_schema_key_github')
# organization
organization = self.app.config.get('github', 'organization')
# teams setup
team_admins = self.app.config.get('github', 'team_admins')
team_admins_group = self.app.config.get('github', 'team_admins_group')
team_users = self.app.config.get('github', 'team_users')
team_pattern = self.app.config.get('github', 'team_pattern')
# repos setup
repo_admins = self.app.config.get('github', 'repo_admins')
repo_users = self.app.config.get('github', 'repo_users')
repo_pattern = self.app.config.get('github', 'repo_pattern')
# get the users
user_repository = data.UserRepository(self.app.config)
# create github session
gh = login(token=self.app.config.get('github', 'auth_token'))
# get the organization
org = gh.organization(organization)
if not org:
raise RuntimeError("Organization '%s' not found" % organization)
# get all organization teams
teams = dict((t.name, t) for t in org.iter_teams())
# create group teams
for group in user_repository.groups():
team_name = team_pattern % int(group)
repo_names = ['%s/%s' % (organization, repo_pattern % int(group))]
self.__class__.__create_team(org, team_name, repo_names, 'push', teams)
# update group teams members
for group in user_repository.groups():
team = teams[team_pattern % int(group)]
members_act = set(m.login for m in team.iter_members())
members_exp = set(u[key_github] for u in user_repository.users(lambda x: x[key_group] == group))
self.__class__.__update_team_members(team, members_act, members_exp)
# create admins team
repo_names = ['%s/%s' % (organization, repo_admins)] + \
['%s/%s' % (organization, repo_users)] + \
['%s/%s' % (organization, repo_pattern % int(group)) for group in user_repository.groups()]
self.__class__.__create_team(org, team_admins, repo_names, 'admin', teams)
# update admins team members
team = teams[team_admins]
members_act = set(m.login for m in team.iter_members())
members_exp = set(u[key_github] for u in user_repository.users(lambda x: x[key_group] == team_admins_group))
self.__class__.__update_team_members(team, members_act, members_exp)
# create users team
repo_names = ['%s/%s' % (organization, repo_users)]
self.__class__.__create_team(org, team_users, repo_names, 'pull', teams)
# update users team members
team = teams[team_users]
members_act = set(m.login for m in team.iter_members())
members_exp = set(u[key_github] for u in user_repository.users())
self.__class__.__update_team_members(team, members_act, members_exp)
@controller.expose(help="Deletes GitHub teams.")
def delete_teams(self):
if not self.__class__.prompt_confirm(colored('This cannot be undone! Proceed? (yes/no): ', 'red')):
cprint("Aborting delete command.", 'yellow', file=sys.stdout)
return
self.app.log.debug('Deleting GitHub teams.')
# validate required config parameters
if not self.app.config.get('github', 'auth_token') or not self.app.config.get('github', 'auth_id'):
raise error.ConfigError("Missing config parameter 'github.auth_id' and/or 'github.auth_token'! "
"Please run 'scrum-tools github authorize' first! ")
# organization
organization = self.app.config.get('github', 'organization')
# teams setup
team_admins = self.app.config.get('github', 'team_admins')
team_users = self.app.config.get('github', 'team_users')
team_pattern = self.app.config.get('github', 'team_pattern')
user_repository = data.UserRepository(self.app.config)
gh = login(token=self.app.config.get('github', 'auth_token'))
# get the organization
org = gh.organization(organization)
if not org:
raise RuntimeError("Organization '%s' not found" % organization)
# get all organization teams
teams = dict((t.name, t) for t in org.iter_teams())
# delete group teams
for group in user_repository.groups():
team_name = team_pattern % int(group)
self.__class__.__delete_team(team_name, teams)
# delete admins team
self.__class__.__delete_team(team_admins, teams)
# delete users team
self.__class__.__delete_team(team_users, teams)
@staticmethod
def __create_repo(org, repo_name, teams, repos):
if not repo_name in repos:
print colored("Creating repository '%s'..." % repo_name, 'green'),
repo = org.create_repo(name=repo_name, private=True, has_wiki=False)
if repo:
repos[repo_name] = repo
print colored('OK', 'green', attrs=['bold'])
else:
print colored('Not OK', 'red', attrs=['bold'])
else:
print colored("Skipping repository '%s' (already exists)." % repo_name, 'yellow')
for team in teams:
print colored("Adding repo '%s/%s' to team '%s'..." % (org.login, repo_name, team.name), 'green'),
if team.add_repo('%s/%s' % (org.login, repo_name)):
print colored('OK', 'green', attrs=['bold'])
else:
print colored('Not OK', 'red', attrs=['bold'])
@staticmethod
def __delete_repo(repo_name, repos):
if repo_name in repos:
print colored("Deleting repository '%s'..." % repo_name, 'green'),
if repos[repo_name].delete():
del repos[repo_name]
print colored('OK', 'green', attrs=['bold'])
else:
print colored('Not OK', 'red', attrs=['bold'])
else:
print colored("Skipping repository '%s' (does not exist)." % repo_name, 'yellow')
@staticmethod
def __create_team(org, team_name, repo_names, premission, teams):
if not team_name in teams:
print colored("Creating team '%s'..." % team_name, 'green'),
team = org.create_team(name=team_name, repo_names=repo_names, permission=premission)
if team:
teams[team_name] = team
print colored('OK', 'green', attrs=['bold'])
else:
print colored('Not OK', 'red', attrs=['bold'])
else:
print colored("Skipping team '%s' (already exists)." % team_name, 'yellow')
@staticmethod
def __delete_team(team_name, teams):
if team_name in teams:
print colored("Deleting team '%s'..." % team_name, 'green'),
if teams[team_name].delete():
del teams[team_name]
print colored('OK', 'green', attrs=['bold'])
else:
print colored('Not OK', 'red', attrs=['bold'])
else:
print colored("Skipping team '%s' (does not exist)." % team_name, 'yellow')
@staticmethod
def __update_team_members(team, members_act, members_exp):
print colored("Updating team members for team '%s'." % team.name, 'green')
# add missing team members
for u in members_exp - members_act:
print colored("Adding '%s' to team '%s'..." % (u, team.name), 'green'),
if team.invite(u):
print colored('OK', 'green', attrs=['bold'])
else:
print colored('Not OK', 'red', attrs=['bold'])
# remove unexpected team members
for u in members_act - members_exp:
print colored("Removing '%s' from team '%s'..." % (u, team.name), 'green'),
if team.remove_member(u):
print colored('OK', 'green', attrs=['bold'])
else:
print colored('Not OK', 'red', attrs=['bold'])
@staticmethod
def prompt_login():
import getpass
u = prompt("GitHub username [%s]: " % getpass.getuser())
if not u:
u = getpass.getuser()
password_prompt = lambda: (getpass.getpass("GitHub password: "), getpass.getpass('GitHub password (again): '))
p1, p2 = password_prompt()
while p1 != p2:
print('Passwords do not match. Try again')
p1, p2 = password_prompt()
return u, p1
@staticmethod
def prompt_two_factor_login():
code = ''
while not code:
code = prompt('Enter 2FA code: ')
return code
@staticmethod
def prompt_confirm(question='Do you really want to do this (yes/no)?', answer_true='yes'):
return prompt(question) == answer_true | 42.150812 | 121 | 0.608686 | 17,186 | 0.946001 | 0 | 0 | 16,003 | 0.880883 | 0 | 0 | 5,471 | 0.30115 |
2072a741b04f9c964e7ed9b4b5f47b7e8423121d | 3,628 | py | Python | cubam/MajorityModel.py | welinder/cubam | fe5ba700f1adbb489c69af311558d64370d73d36 | [
"BSD-3-Clause-Clear"
] | 20 | 2015-01-10T02:53:44.000Z | 2022-03-20T18:10:15.000Z | cubam/MajorityModel.py | afcarl/cubam | fe5ba700f1adbb489c69af311558d64370d73d36 | [
"BSD-3-Clause-Clear"
] | 1 | 2019-01-30T17:02:51.000Z | 2019-01-30T17:02:51.000Z | cubam/MajorityModel.py | afcarl/cubam | fe5ba700f1adbb489c69af311558d64370d73d36 | [
"BSD-3-Clause-Clear"
] | 12 | 2016-02-22T02:43:55.000Z | 2021-09-19T20:50:09.000Z | from BinaryModel import *
from numpy.random import rand
class MajorityModel(BinaryModel):
def __init__(self, filename=None):
self.mdlPrm = {
'addNoise' : False,
}
self.wkrIds = {}
self.imgIds = {}
if filename:
self.load_data(filename)
else:
self._setup_prior()
def __del__(self):
pass
def load_data(self, filename, skipyaml=False):
"""
Data is assumed to be in the format:
imageId workerId label
"""
# load the text data
filein = open(filename)
info = filein.readline().rstrip().split(' ')
self.numLbls = int(info[2])
self.numWkrs = int(info[1])
self.numImgs = int(info[0])
self.imgPrm = []
for i in range(self.numImgs):
self.imgPrm.append([0, 0]) # (frac +ve votes, total n votes)
self.wkrLbls = dict((id, []) for id in range(self.numWkrs))
self.imgLbls = dict((id, []) for id in range(self.numImgs))
self.labels = []
for (lino, line) in enumerate(filein):
cols = [int(c) for c in line.rstrip().split(' ')]
iId = cols[0]; wId = cols[1]; lij = int(cols[2]==1)
self.wkrLbls[wId].append([iId, lij])
self.imgLbls[iId].append([wId, lij])
self.labels.append((iId, wId, lij))
self.imgPrm[iId][0] += lij
self.imgPrm[iId][1] += 1
# renormalize img prm
for i in range(len(self.imgPrm)):
self.imgPrm[i][0] = float(self.imgPrm[i][0])/self.imgPrm[i][1]
def get_num_wkrs(self):
return self.numWkrs
def get_num_imgs(self):
return self.numImgs
def get_num_lbls(self):
return self.numLbls
def set_model_param(self, raw=[], prm=None):
"""
Sets model parameters.
Arguments:
- `raw`: raw parameter vector
- `prm`: hash of model parameter values to be changed
"""
if not prm is None:
for (k, v) in prm.iteritems():
self.mdlPrm[k] = v
def set_worker_param(self, raw):
pass
def set_image_param(self, raw):
self.imgPrm = [r for r in raw]
def get_model_param(self):
return {}
def get_worker_param_raw(self):
return {}
def get_image_param_raw(self):
return [p for p in self.imgPrm]
def get_worker_param(self, id=None):
return {}
def get_image_param(self, id=None):
return [p for p in self.imgPrm]
def get_labels(self):
if self.mdlPrm['addNoise']:
return [int((self.imgPrm[i][0]+(rand()-.5)/self.imgPrm[i][1])>.5)\
for i in range(len(self.imgPrm))]
else:
return [int(self.imgPrm[i][0]>.5) for i \
in range(len(self.imgPrm))]
# TODO: load and save parameters
def optimize_worker_param(self):
pass
def optimize_image_param(self):
pass
def objective(self, prm=None):
pass
def image_objective(self, prm=None):
pass
def image_objective_range(self, imgId, prm):
pass
def worker_objective_range(self, wkrId, prm):
pass
def gradient(self, prm=None):
return []
def worker_gradient(self, prm=None):
return []
def image_gradient(self, prm=None):
pass
def get_num_wkr_lbls(self):
return [len(self.wkrLbls[id]) for id in range(self.numWkrs)]
def get_num_img_lbls(self):
return [len(self.imgLbls[id]) for id in range(self.numImgs)]
| 27.484848 | 78 | 0.5543 | 3,570 | 0.984013 | 0 | 0 | 0 | 0 | 0 | 0 | 393 | 0.108324 |
2073f752394f61237cdcd24fae2aec0b516f1d64 | 2,016 | py | Python | visprotocol/server/test_multi_LED.py | ClandininLab/vis-protocol | d4438dccea3987b8f21648d439fe1c1349940024 | [
"MIT"
] | null | null | null | visprotocol/server/test_multi_LED.py | ClandininLab/vis-protocol | d4438dccea3987b8f21648d439fe1c1349940024 | [
"MIT"
] | null | null | null | visprotocol/server/test_multi_LED.py | ClandininLab/vis-protocol | d4438dccea3987b8f21648d439fe1c1349940024 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from flystim.screen import Screen, SubScreen
from flystim.draw import draw_screens
from flystim.stim_server import StimServer
from flystim.dlpc350 import make_dlpc350_objects
from math import pi
import matplotlib.pyplot as plt
def main():
# LCR USB commands handled thru lightcrafter package script first
def getBrukerRight():
# Define screen(s) for the rig. Units in meters
# Fly is at (0, 0, 0), fly looking down +y axis. Top of the screen is at z=0
scale_fact = 2.52
x_right = scale_fact*7.99e-2
# x_almost_center = +0.919e-2
y_back = scale_fact*-0.8e-2
# y_forward = +6.25e2
# z_top = +2.87e2
# z_bottom = -8.98e-2 #m
z_bottom = scale_fact*-12.13e-2
y_forward = scale_fact*7.17e-2
# set screen width and height
pb = (x_right, y_back, z_bottom)
pa = (0, y_forward, z_bottom)
pc = (0, y_forward, 0)
viewport_ll = (-0.54, -0.46)
viewport_height = 0.61 - (-0.46)
viewport_width = 0.23 - (-0.54)
return SubScreen(pa, pb, pc, viewport_ll, viewport_width, viewport_height)
def getAux():
return SubScreen(pa=(x_left, y_back, z_bottom), pb=(0, y_forward, z_bottom), pc=(x_left, y_back, 0))
bruker_right_screen = Screen(subscreens=[getBrukerRight()], id=3, fullscreen=True, vsync=True, square_size=(0.11, 0.23), square_loc=(0.89, -1.00), name='Left', horizontal_flip=True)
aux_screen = Screen(subscreens=[getBrukerRight()], id=0, fullscreen=False, vsync=True, square_size=(0, 0), square_loc=(-1, -1), name='Aux', horizontal_flip=False)
#screens = [bruker_left_screen, aux_screen]
screens = [bruker_right_screen, aux_screen]
port = 60629
host = ''
manager = StimServer(screens=screens, host=host, port=port, auto_stop=False)
manager.black_corner_square()
manager.set_idle_background(0)
manager.loop()
if __name__ == '__main__':
main()
| 34.758621 | 185 | 0.650298 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 419 | 0.207837 |
2074b6146e8831cccc4002f920981b1ef1a5685a | 2,511 | py | Python | UserInterfaces.py | StudentCV/TableSoccerCV | dcead6a3b53f959a2264a4f7372b3a9b6904b476 | [
"Apache-2.0"
] | 10 | 2016-06-17T10:30:27.000Z | 2021-04-10T19:46:41.000Z | UserInterfaces.py | StudentCV/TableSoccerCV | dcead6a3b53f959a2264a4f7372b3a9b6904b476 | [
"Apache-2.0"
] | null | null | null | UserInterfaces.py | StudentCV/TableSoccerCV | dcead6a3b53f959a2264a4f7372b3a9b6904b476 | [
"Apache-2.0"
] | 1 | 2019-03-28T22:16:06.000Z | 2019-03-28T22:16:06.000Z | #Copyright 2016 StudentCV
#Copyright and related rights are licensed under the
#Solderpad Hardware License, Version 0.51 (the “License”);
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at http://solderpad.org/licenses/SHL-0.51.
#Unless required by applicable law or agreed to in writing,
#software, hardware and materials distributed under this License
#is distributed on an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
#either express or implied. See the License for the specific language
#governing permissions and limitations under the License.
import matplotlib.pyplot as plt
import cv2
class PythonInterface:
"""
Class for interfaces.
"""
def run(self):
"""
:return: Boolean, 1 if the analysis shall be executed, 0 if not
"""
return True
pass
def show_image(self, image, draw=[]):
"""
:param image:
:param draw:
:return: None
"""
if 0 != draw:
for task in draw:
image = task(image)
plt.figure()
plt.imshow(cv2.cvtColor(image, cv2.COLOR_HSV2RGB))
plt.show()
total_frame_time = 0
def show_video(self, frame, get_source_var, draw=[]):
"""
:param frame: HSV-image
:param get_source_var:
:param draw:
:return:
"""
frame_time = get_source_var('FrameTime')
self.total_frame_time = self.total_frame_time + frame_time
#print(1/frame_time)
if self.total_frame_time >= (1/30):
if 0 != draw:
for task in draw:
frame = task(frame)
cv2.imshow('Soccer', cv2.cvtColor(frame, cv2.COLOR_HSV2BGR))
cv2.waitKey(1)
self.total_frame_time = 0
else:
return
start_session = {"key": 0, "text": "Start a sesstion"}
start_calibration = {"key": 1, "text": "Start calibration"}
start_match = {"key": 2, "text": "Start the match"}
def wait_for_user_command(self, command):
"""
Returns if the desired command is issued by user.
No time limit!
:param command:
:return:
"""
#self.message(command["text"]+"?")
#input(command["text"]+"?")
return True
def message(self, message):
"""
prints the message text
:param message: string
:return:
"""
print(message)
| 26.712766 | 82 | 0.589805 | 1,843 | 0.73164 | 0 | 0 | 0 | 0 | 0 | 0 | 1,361 | 0.540294 |
207541c4f4a46b92967908249bf629d6ac8f4fb1 | 6,335 | py | Python | data/external/repositories/115375/hail-seizure-master/train.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories/115375/hail-seizure-master/train.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2015-12-10T16:46:02.000Z | 2018-05-21T23:01:55.000Z | data/external/repositories/115375/hail-seizure-master/train.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z | #!/usr/bin/env python3
import python.utils as utils
import os
import joblib
import pickle
import pdb
def main(settingsfname, verbose=False, store_models=True,
store_features=False, save_training_detailed=False,
load_pickled=False, parallel=0):
settings = utils.get_settings(settingsfname)
utils.print_verbose('=== Settings file ===', flag=verbose)
utils.print_verbose(settingsfname, flag=verbose)
utils.print_verbose('=== Settings loaded ===', flag=verbose)
utils.print_verbose(settings, flag=verbose)
utils.print_verbose('=======================', flag=verbose)
subjects = settings['SUBJECTS']
data = utils.get_data(settings, verbose=verbose)
metadata = utils.get_metadata()
features_that_parsed = [feature for feature in
settings['FEATURES'] if feature in list(data.keys())]
settings['FEATURES'] = features_that_parsed
if not settings['FEATURES']:
raise EnvironmentError('No features could be loaded')
utils.print_verbose("=====Feature HDF5s parsed=====", flag=verbose)
model_pipe = utils.build_model_pipe(settings)
utils.print_verbose("=== Model Used ===\n"
"{0}\n==================".format(model_pipe),
flag=verbose)
# dictionary to store results
subject_predictions = {}
# dictionary to store features in
transformed_features = {}
# if we're loading pickled features then load them
if load_pickled:
if isinstance(load_pickled, str):
with open(load_pickled, "rb") as fh:
Xtra = pickle.load(fh)
else:
with open(settingsfname.split(".")[0]
+ "_feature_dump.pickle", "rb") as fh:
Xtra = pickle.load(fh)
else:
Xtra = None
# dictionary for final scores
auc_scores = {}
if not parallel:
for subject in subjects:
utils.print_verbose(
"=====Training {0} Model=====".format(str(subject)),
flag=verbose)
if 'RFE' in settings:
transformed_features, auc = utils.train_RFE(settings,
data,
metadata,
subject,
model_pipe,
transformed_features,
store_models,
store_features,
load_pickled,
settingsfname,
verbose,
extra_data=Xtra)
subject_predictions = None
elif 'CUSTOM' in settings:
results, auc = utils.train_custom_model(settings,
data,
metadata,
subject,
model_pipe,
store_models,
load_pickled,
verbose,
extra_data=Xtra)
subject_predictions[subject] = results
else:
results, auc = utils.train_model(settings,
data,
metadata,
subject,
model_pipe,
store_models,
load_pickled,
verbose,
extra_data=Xtra)
subject_predictions[subject] = results
auc_scores.update({subject: auc})
if parallel:
if 'RFE' in settings:
raise NotImplementedError('Parallel RFE is not implemented')
else:
output = joblib.Parallel(n_jobs=parallel)(
joblib.delayed(utils.train_model)(settings,
data,
metadata,
subject,
model_pipe,
store_models,
load_pickled,
verbose,
extra_data=Xtra,
parallel=parallel)
for subject in subjects)
results = [x[0] for x in output]
aucs = [x[1] for x in output]
for result in results:
subject_predictions.update(result)
for auc in aucs:
auc_scores.update(auc)
if save_training_detailed:
with open(save_training_detailed, "wb") as fh:
pickle.dump(subject_predictions[subject], fh)
combined_auc = utils.combined_auc_score(settings,
auc_scores,
subj_pred=subject_predictions)
print(
"predicted AUC score over all subjects: {0:.2f}".format(combined_auc))
auc_scores.update({'all': combined_auc})
utils.output_auc_scores(auc_scores, settings)
return auc_scores
if __name__ == '__main__':
# get and parse CLI options
parser = utils.get_parser()
args = parser.parse_args()
main(args.settings,
verbose=args.verbose,
save_training_detailed=args.pickle_detailed,
parallel=int(args.parallel))
| 38.865031 | 81 | 0.424467 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 594 | 0.093765 |
20778423e20ac6661734493d2303bc03ce7d5df0 | 982 | py | Python | tests/scraper/models.py | teolemon/django-dynamic-scraper | 2a46df8828fa8dcf4f74315abe99cc37b214b2e8 | [
"BSD-3-Clause"
] | null | null | null | tests/scraper/models.py | teolemon/django-dynamic-scraper | 2a46df8828fa8dcf4f74315abe99cc37b214b2e8 | [
"BSD-3-Clause"
] | null | null | null | tests/scraper/models.py | teolemon/django-dynamic-scraper | 2a46df8828fa8dcf4f74315abe99cc37b214b2e8 | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from dynamic_scraper.models import Scraper, SchedulerRuntime
from scrapy.contrib.djangoitem import DjangoItem
class EventWebsite(models.Model):
name = models.CharField(max_length=200)
scraper = models.ForeignKey(Scraper, blank=True, null=True, on_delete=models.SET_NULL)
url = models.URLField()
scraper_runtime = models.ForeignKey(SchedulerRuntime, blank=True, null=True, on_delete=models.SET_NULL)
def __unicode__(self):
return self.name + " (" + str(self.id) + ")"
class Event(models.Model):
title = models.CharField(max_length=200)
event_website = models.ForeignKey(EventWebsite)
description = models.TextField(blank=True)
url = models.URLField()
checker_runtime = models.ForeignKey(SchedulerRuntime, blank=True, null=True, on_delete=models.SET_NULL)
def __unicode__(self):
return self.title + " (" + str(self.id) + ")"
class EventItem(DjangoItem):
django_model = Event | 35.071429 | 107 | 0.726069 | 835 | 0.850305 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.014257 |
207a4de3d61bc090e22bce94c09268f291db401d | 395 | py | Python | users/models.py | lizooo/webpage | 4a203ad04991a4ae54d6bd1179054715b56095aa | [
"MIT"
] | 1 | 2021-12-16T15:56:35.000Z | 2021-12-16T15:56:35.000Z | users/models.py | Na11a/webpage | 29ba3ecee7c122a7ce92c6053077f00056e6ce28 | [
"MIT"
] | 6 | 2020-04-25T17:43:43.000Z | 2021-11-04T20:02:46.000Z | users/models.py | Na11a/webpage | 29ba3ecee7c122a7ce92c6053077f00056e6ce28 | [
"MIT"
] | 10 | 2020-10-05T12:55:54.000Z | 2021-11-21T12:03:30.000Z | from django.db import models
# Create your models here.
from django.db import models
from datetime import datetime
class User(models.Model):
name = models.CharField(max_length=100)
surname = models.CharField(max_length=100)
email = models.EmailField(unique=True)
password = models.CharField(max_length=128)
created = models.DateTimeField('Created', default=datetime.now)
| 26.333333 | 67 | 0.756962 | 275 | 0.696203 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.088608 |
207a528acd1c6078894046fa653d5ad571c45a65 | 1,038 | py | Python | doctable/textmodels/parsetreedoc.py | devincornell/sqlitedocuments | 16923bb3b91af5104140e49045efdc612afbc310 | [
"MIT"
] | 1 | 2019-06-19T20:27:55.000Z | 2019-06-19T20:27:55.000Z | doctable/textmodels/parsetreedoc.py | devincornell/sqlitedocuments | 16923bb3b91af5104140e49045efdc612afbc310 | [
"MIT"
] | 21 | 2019-04-12T01:08:20.000Z | 2020-11-09T18:28:41.000Z | doctable/textmodels/parsetreedoc.py | devincornell/sqlitedocuments | 16923bb3b91af5104140e49045efdc612afbc310 | [
"MIT"
] | null | null | null |
from typing import Any
from .basedoc import BaseDoc
from .parsetree import ParseTree
class ParseTreeDoc(list):
''' Represents a document composed of sequence of parsetrees.
'''
@property
def tokens(self):
return (t for pt in self for t in pt)
def as_dict(self):
''' Convert document into a list of dict-formatted parsetrees.
'''
return [pt.as_dict() for pt in self]
@classmethod
def from_dict(cls, tree_data: list, *args, **kwargs):
''' Create new ParseTreeDoc from a dictionary tree created by as_dict().
Args:
tree_data: list of dict trees created from cls.as_dict()
'''
# root is reference to entire tree
return cls(ParseTree.from_dict(ptd, *args, **kwargs) for ptd in tree_data)
@classmethod
def from_spacy(cls, doc: Any, *args, **kwargs):
''' Create a new ParseTreeDoc from a spacy Doc object.
'''
return cls(ParseTree.from_spacy(sent, *args, **kwargs) for sent in doc.sents)
| 28.833333 | 85 | 0.633911 | 943 | 0.908478 | 0 | 0 | 674 | 0.649326 | 0 | 0 | 410 | 0.39499 |